title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
WIP: Make weekday_name field in DatetimeIndex categorical
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e944df7aa83c6..7853731537eb5 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -42,6 +42,7 @@ from pandas.tseries.frequencies import to_offset, get_period_alias, Resolution from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) +from pandas.core.indexes.category import CategoricalIndex from pandas.tseries.offsets import ( DateOffset, generate_range, Tick, CDay, prefix_mapping) @@ -55,8 +56,7 @@ from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timestamp) from pandas._libs.tslibs import (timezones, conversion, fields, parsing, - ccalendar, - resolution as libresolution) + resolution as libresolution, ccalendar) # -------- some conversion wrapper functions @@ -2511,7 +2511,9 @@ def month_name(self, locale=None): result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result) - return Index(result, name=self.name) + return CategoricalIndex(result, ordered=True, + categories=ccalendar.MONTHS_FULL[1:], + name=self.name) def day_name(self, locale=None): """ @@ -2537,7 +2539,8 @@ def day_name(self, locale=None): result = fields.get_date_name_field(values, 'day_name', locale=locale) result = self._maybe_mask_results(result) - return Index(result, name=self.name) + return CategoricalIndex(result, ordered=True, name=self.name, + categories=ccalendar.DAYS_FULL) DatetimeIndex._add_comparison_methods() diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 056924f2c6663..05bec15303a74 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -7,7 +7,7 @@ import pandas as pd import pandas.util.testing as tm from pandas import (Index, DatetimeIndex, datetime, offsets, - date_range, Timestamp) + date_range, Timestamp, CategoricalIndex) class TestTimeSeries(object): @@ -283,7 +283,9 @@ def test_datetime_name_accessors(self, time_locale): # GH 12805 dti = DatetimeIndex(freq='M', start='2012', end='2013') result = dti.month_name(locale=time_locale) - expected = Index([month.capitalize() for month in expected_months]) + expected = CategoricalIndex( + [month.capitalize() for month in expected_months], + ordered=True, categories=expected_months) tm.assert_index_equal(result, expected) for date, expected in zip(dti, expected_months): result = date.month_name(locale=time_locale) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 47798d0ddd7f5..616301276d4c7 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -308,7 +308,8 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): s = Series(DatetimeIndex(freq='M', start='2012', end='2013')) result = s.dt.month_name(locale=time_locale) - expected = Series([month.capitalize() for month in expected_months]) + expected = Series([month.capitalize() for month in expected_months])\ + .astype('category', ordered=True, categories=expected_months) tm.assert_series_equal(result, expected) for s_date, expected in zip(s, expected_months): result = s_date.month_name(locale=time_locale)
- [x] closes #12993 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Work in progress. `DatetimeIndex` `CategoricalIndex` for `weekday_name` but accessing it from `dt.weekday_name` still returns an `object` dtype
https://api.github.com/repos/pandas-dev/pandas/pulls/21177
2018-05-23T04:25:09Z
2018-10-11T01:50:51Z
null
2018-10-11T01:50:51Z
BUG: read_csv with specified kwargs
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 67c7ce150132a..0f2c9c4756987 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -64,6 +64,7 @@ Bug Fixes **I/O** +- Bug in :func:`read_csv` that caused it to incorrectly raise an error when ``nrows=0``, ``low_memory=True``, and ``index_col`` was not ``None`` (:issue:`21141`) - - diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2c8f98732c92f..65df2bffb4abf 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -3209,12 +3209,22 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): col = columns[k] if is_integer(k) else k dtype[col] = v - if index_col is None or index_col is False: + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic emtpy Index. + if (index_col is None or index_col is False) or index_names is None: index = Index([]) else: data = [Series([], dtype=dtype[name]) for name in index_names] index = _ensure_index_from_sequences(data, names=index_names) index_col.sort() + for i, n in enumerate(index_col): columns.pop(n - i) diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 2b7ff1f5a9879..b39122e5e7906 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -238,6 +238,21 @@ def test_csv_mixed_type(self): out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) + def test_read_csv_low_memory_no_rows_with_index(self): + if self.engine == "c" and not self.low_memory: + pytest.skip("This is a low-memory specific test") + + # see gh-21141 + data = """A,B,C +1,1,1,2 +2,2,3,4 +3,3,4,5 +""" + out = self.read_csv(StringIO(data), low_memory=True, + index_col=0, nrows=0) + expected = DataFrame(columns=["A", "B", "C"]) + tm.assert_frame_equal(out, expected) + def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0,
- [+] closes #21141 - [+] tests added / passed - [+] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Solves the issue 21141.
https://api.github.com/repos/pandas-dev/pandas/pulls/21176
2018-05-22T20:36:04Z
2018-06-19T11:26:49Z
2018-06-19T11:26:49Z
2018-06-29T14:58:23Z
Fix nonzero of a SparseArray
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index b5d0532c6dfa3..18edc290768c8 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1437,6 +1437,7 @@ Sparse - Bug in ``DataFrame.groupby`` not including ``fill_value`` in the groups for non-NA ``fill_value`` when grouping by a sparse column (:issue:`5078`) - Bug in unary inversion operator (``~``) on a ``SparseSeries`` with boolean values. The performance of this has also been improved (:issue:`22835`) - Bug in :meth:`SparseArary.unique` not returning the unique values (:issue:`19595`) +- Bug in :meth:`SparseArray.nonzero` and :meth:`SparseDataFrame.dropna` returning shifted/incorrect results (:issue:`21172`) Build Changes ^^^^^^^^^^^^^ diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 0e5a8280cc467..619cd05128ddb 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -784,6 +784,23 @@ def test_fillna_overlap(self): exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64) tm.assert_sp_array_equal(res, exp) + def test_nonzero(self): + # Tests regression #21172. + sa = pd.SparseArray([ + float('nan'), + float('nan'), + 1, 0, 0, + 2, 0, 0, 0, + 3, 0, 0 + ]) + expected = np.array([2, 5, 9], dtype=np.int32) + result, = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + result, = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + class TestSparseArrayAnalytics(object): diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index dd73ec69c3b9a..f802598542cb9 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1360,3 +1360,16 @@ def test_assign_with_sparse_frame(self): for column in res.columns: assert type(res[column]) is SparseSeries + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("how", ["all", "any"]) + def test_dropna(self, inplace, how): + # Tests regression #21172. + expected = pd.SparseDataFrame({"F2": [0, 1]}) + input_df = pd.SparseDataFrame( + {"F1": [float('nan'), float('nan')], "F2": [0, 1]} + ) + result_df = input_df.dropna(axis=1, inplace=inplace, how=how) + if inplace: + result_df = input_df + tm.assert_sp_frame_equal(expected, result_df)
The nonzero operation returned the nonzero locations of the underlying index. However we need to get the nonzero locations in the real array. For this operation to be faster an inverse index structure would be beneficial or it could be implemented using binary search. ```python sa = pd.SparseArray([float('nan'), float('nan'), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) ``` returned `0, 3, 7`. The index is shifted by two because of the two first `NaN`s and that's why the `0, 3, 7` are returned. The correct result would be `2, 5, 9` and is found in the method. For the above sample the code works. However for other implementations of `SparseIndex` it could be broken. - [x] closes #21172 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21175
2018-05-22T20:27:37Z
2018-11-17T22:15:58Z
2018-11-17T22:15:57Z
2018-11-17T22:16:02Z
Remove deprecated Slepian test
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index d8e90ae0e1b35..74f2c977e0db2 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -41,7 +41,7 @@ def win_types(request): return request.param -@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian', 'slepian']) +@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian']) def win_types_special(request): return request.param @@ -1079,8 +1079,7 @@ def test_cmov_window_special(self, win_types_special): kwds = { 'kaiser': {'beta': 1.}, 'gaussian': {'std': 1.}, - 'general_gaussian': {'power': 2., 'width': 2.}, - 'slepian': {'width': 0.5}} + 'general_gaussian': {'power': 2., 'width': 2.}} vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) @@ -1090,8 +1089,6 @@ def test_cmov_window_special(self, win_types_special): 13.65671, 12.01002, np.nan, np.nan], 'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161, 13.08516, 12.95111, 12.74577, np.nan, np.nan], - 'slepian': [np.nan, np.nan, 9.81073, 10.89359, 11.70284, 12.88331, - 12.96079, 12.77008, np.nan, np.nan], 'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129, 12.90702, 12.83757, np.nan, np.nan] }
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21173
2018-05-22T16:22:41Z
2018-05-23T10:35:43Z
2018-05-23T10:35:43Z
2018-05-23T17:26:16Z
BUG: group with multiple named results
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index df7a5dc9dc173..6cd2a91e9c17d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2298,7 +2298,28 @@ def levels(self): @property def names(self): - return [ping.name for ping in self.groupings] + # add suffix to level name in case they contain duplicates (GH 19029): + orig_names = [ping.name for ping in self.groupings] + # if no names were assigned return the original names + if all(x is None for x in orig_names): + return orig_names + + suffixes = collections.defaultdict(int) + dups = {n: count for n, count in + collections.Counter(orig_names).items() if count > 1} + new_names = [] + for name in orig_names: + if name not in dups: + new_names.append(name) + else: + if name is not None: + new_name = '{0}_{1}'.format(name, suffixes[name]) + else: + new_name = '{0}'.format(suffixes[name]) + suffixes[name] += 1 + new_names.append(new_name) + + return new_names def size(self): """ diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index e0793b8e1bd64..fc3f2b1b7c4b7 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -558,10 +558,6 @@ def test_as_index(): result = df.groupby(['cat', s], as_index=False, observed=True).sum() tm.assert_frame_equal(result, expected) - # GH18872: conflicting names in desired index - with pytest.raises(ValueError): - df.groupby(['cat', s.rename('cat')], observed=True).sum() - # is original index dropped? group_columns = ['cat', 'A'] expected = DataFrame( diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e05f9de5ea7f4..a583c1230bfa4 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1674,3 +1674,44 @@ def test_tuple_correct_keyerror(): [3, 4]])) with tm.assert_raises_regex(KeyError, "(7, 8)"): df.groupby((7, 8)).mean() + + +def test_dup_index_names(): + # dup. index names in groupby operations should be renamed (GH 19029): + df = pd.DataFrame({'date': pd.date_range('5.1.2018', '5.3.2018'), + 'vals': list(range(3))}) + + # duplicates get suffixed by integer position + mi = pd.MultiIndex.from_product([[5], [1, 2, 3]], + names=['date_0', 'date_1']) + expected = pd.Series(data=list(range(3)), index=mi, name='vals') + result = df.groupby([df.date.dt.month, df.date.dt.day])['vals'].sum() + + tm.assert_series_equal(result, expected) + + # 2 out of 3 are duplicates and None + mi = pd.MultiIndex.from_product([[2018], [5], [1, 2, 3]], + names=['0', '1', 'date']) + expected = pd.Series(data=list(range(3)), index=mi, name='vals') + result = df.groupby([df.date.dt.year.rename(None), + df.date.dt.month.rename(None), + df.date.dt.day])['vals'].sum() + tm.assert_series_equal(result, expected) + + # 2 out of 3 names (not None) are duplicates, the remaining is None + mi = pd.MultiIndex.from_product([[2018], [5], [1, 2, 3]], + names=['date_0', None, 'date_1']) + expected = pd.Series(data=list(range(3)), index=mi, name='vals') + result = df.groupby([df.date.dt.year, + df.date.dt.month.rename(None), + df.date.dt.day])['vals'].sum() + tm.assert_series_equal(result, expected) + + # all are None + mi = pd.MultiIndex.from_product([[2018], [5], [1, 2, 3]], + names=[None, None, None]) + expected = pd.Series(data=list(range(3)), index=mi, name='vals') + result = df.groupby([df.date.dt.year.rename(None), + df.date.dt.month.rename(None), + df.date.dt.day.rename(None)])['vals'].sum() + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index d2cf3fc11e165..3e416e6fed161 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1705,9 +1705,21 @@ def test_crosstab_with_numpy_size(self): tm.assert_frame_equal(result, expected) def test_crosstab_dup_index_names(self): - # GH 13279, GH 18872 + # duplicated index name should get renamed (GH 19029) s = pd.Series(range(3), name='foo') - pytest.raises(ValueError, pd.crosstab, s, s) + failed = False + try: + result = pd.crosstab(s, s) + except ValueError: + failed = True + + assert failed is False + + s0 = pd.Series(range(3), name='foo0') + s1 = pd.Series(range(3), name='foo1') + expected = pd.DataFrame(np.diag(np.ones(3, dtype='int64')), + index=s0, columns=s1) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("names", [['a', ('b', 'c')], [('a', 'b'), 'c']])
- [x] closes #19029 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This bugfix gets rid of duplicated names that can be the result of groupby operations (#19029). I opted to implement one of the ideas proposed by [toobaz](https://github.com/pandas-dev/pandas/issues/19029#issuecomment-354684340): duplicated names get suffixed by their corresponding position, i.e. ['name','name'] gets transformed into ['name0', 'name1']. * A few testcases have been added. * One particular testcase had to be changed ([test_crosstab_dup_index_names](https://github.com/pandas-dev/pandas/blob/master/pandas/tests/reshape/test_pivot.py)) - This is because with the new bugfix crosstab does not yield a ValueError anymore.
https://api.github.com/repos/pandas-dev/pandas/pulls/21171
2018-05-22T11:53:57Z
2018-11-01T01:38:19Z
null
2018-11-01T01:38:19Z
Small typo in deprecation message added in PR #21060
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 4c237da7b6d0e..e2b0b33053f83 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1246,7 +1246,7 @@ class Timedelta(_Timedelta): deprecated. Use 'array // timedelta.value' instead. If you want to obtain epochs from an array of timestamps, you can rather use - 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. + '(array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. """) warnings.warn(msg, FutureWarning) return other // self.value
https://api.github.com/repos/pandas-dev/pandas/pulls/21170
2018-05-22T10:31:47Z
2018-05-22T18:32:37Z
2018-05-22T18:32:37Z
2018-05-22T18:32:58Z
BUG: Enable stata files to be written to buffers
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index a071d7f3f5534..a7ba0dfbbd1c4 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -83,7 +83,7 @@ Indexing I/O ^^^ -- +- Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0a07e85401638..1d8f225bd4342 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1774,8 +1774,11 @@ def to_stata(self, fname, convert_dates=None, write_index=True, Parameters ---------- - fname : str or buffer - String path of file-like object. + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 8f91c7a497e2d..2797924985c70 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1758,11 +1758,25 @@ def value_labels(self): return self.value_label_dict -def _open_file_binary_write(fname, encoding): +def _open_file_binary_write(fname): + """ + Open a binary file or no-op if file-like + + Parameters + ---------- + fname : string path, path object or buffer + + Returns + ------- + file : file-like object + File object supporting write + own : bool + True if the file was created, otherwise False + """ if hasattr(fname, 'write'): # if 'b' not in fname.mode: - return fname - return open(fname, "wb") + return fname, False + return open(fname, "wb"), True def _set_endianness(endianness): @@ -1899,7 +1913,9 @@ class StataWriter(StataParser): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. .. versionadded:: 0.23.0 support for pathlib, py.path. @@ -1970,6 +1986,7 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels + self._own_file = True # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) @@ -2183,9 +2200,7 @@ def _prepare_pandas(self, data): self.fmtlist[key] = self._convert_dates[key] def write_file(self): - self._file = _open_file_binary_write( - self._fname, self._encoding or self._default_encoding - ) + self._file, self._own_file = _open_file_binary_write(self._fname) try: self._write_header(time_stamp=self._time_stamp, data_label=self._data_label) @@ -2205,6 +2220,23 @@ def write_file(self): self._write_file_close_tag() self._write_map() finally: + self._close() + + def _close(self): + """ + Close the file if it was created by the writer. + + If a buffer or file-like object was passed in, for example a GzipFile, + then leave this file open for the caller to close. In either case, + attempt to flush the file contents to ensure they are written to disk + (if supported) + """ + # Some file-like objects might not support flush + try: + self._file.flush() + except AttributeError: + pass + if self._own_file: self._file.close() def _write_map(self): @@ -2374,7 +2406,7 @@ def _prepare_data(self): def _write_data(self): data = self.data - data.tofile(self._file) + self._file.write(data.tobytes()) def _null_terminate(self, s, as_string=False): null_byte = '\x00' @@ -2641,7 +2673,9 @@ class StataWriter117(StataWriter): ---------- fname : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or - object implementing a binary write() functions. + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. data : DataFrame Input to save convert_dates : dict @@ -2879,7 +2913,7 @@ def _write_data(self): self._update_map('data') data = self.data self._file.write(b'<data>') - data.tofile(self._file) + self._file.write(data.tobytes()) self._file.write(b'</data>') def _write_strls(self): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 110b790a65037..f3a465da4e87f 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -2,6 +2,8 @@ # pylint: disable=E1101 import datetime as dt +import io +import gzip import os import struct import warnings @@ -1473,3 +1475,28 @@ def test_invalid_date_conversion(self): with pytest.raises(ValueError): original.to_stata(path, convert_dates={'wrong_name': 'tc'}) + + @pytest.mark.parametrize('version', [114, 117]) + def test_nonfile_writing(self, version): + # GH 21041 + bio = io.BytesIO() + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + df.to_stata(bio, version=version) + bio.seek(0) + with open(path, 'wb') as dta: + dta.write(bio.read()) + reread = pd.read_stata(path, index_col='index') + tm.assert_frame_equal(df, reread) + + def test_gzip_writing(self): + # writing version 117 requires seek and cannot be used with gzip + df = tm.makeDataFrame() + df.index.name = 'index' + with tm.ensure_clean() as path: + with gzip.GzipFile(path, 'wb') as gz: + df.to_stata(gz, version=114) + with gzip.GzipFile(path, 'rb') as gz: + reread = pd.read_stata(gz, index_col='index') + tm.assert_frame_equal(df, reread)
Enable support for general file-like objects when exporting stata files - [x] closes #21041 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21169
2018-05-22T09:02:36Z
2018-05-24T22:11:46Z
2018-05-24T22:11:46Z
2018-06-12T14:46:44Z
DOC: move mention of #21115 from bugs to enhancements
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index f204fce3a525f..44f7280d5535f 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -15,6 +15,8 @@ and bug fixes. We recommend that all users upgrade to this version. New features ~~~~~~~~~~~~ +- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) + .. _whatsnew_0231.deprecations: @@ -75,7 +77,6 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) -- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) - I/O
Very trivial followup to https://github.com/pandas-dev/pandas/pull/21116#discussion_r189738781
https://api.github.com/repos/pandas-dev/pandas/pulls/21165
2018-05-22T05:20:42Z
2018-05-22T10:19:52Z
2018-05-22T10:19:52Z
2018-07-08T08:31:43Z
BUG: Fix nested_to_record with None values in nested levels
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 974527624a312..e29cb0a5a2626 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -97,6 +97,7 @@ I/O - Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) +- Bug when :meth:`pandas.io.json.json_normalize` was called with ``None`` values in nested levels in JSON (:issue:`21158`) - Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` causes encoding error when compression and encoding are specified (:issue:`21241`, :issue:`21118`) - Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`) - diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 549204abd3caf..17393d458e746 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -80,7 +80,7 @@ def nested_to_record(ds, prefix="", sep=".", level=0): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v - if v is None: # pop the key if the value is None + elif v is None: # pop the key if the value is None new_d.pop(k) continue else: diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 0fabaf747b6de..dc34ba81f679d 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -375,3 +375,59 @@ def test_nonetype_dropping(self): 'info.last_updated': '26/05/2012'}] assert result == expected + + def test_nonetype_top_level_bottom_level(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "country": { + "state": { + "id": None, + "town.info": { + "id": None, + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'location.country.state.id': None, + 'location.country.state.town.info.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected + + def test_nonetype_multiple_levels(self): + # GH21158: If inner level json has a key with a null value + # make sure it doesnt do a new_d.pop twice and except + data = { + "id": None, + "location": { + "id": None, + "country": { + "id": None, + "state": { + "id": None, + "town.info": { + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656}}} + } + } + result = nested_to_record(data) + expected = { + 'location.id': None, + 'location.country.id': None, + 'location.country.state.id': None, + 'location.country.state.town.info.region': None, + 'location.country.state.town.info.x': 49.151580810546875, + 'location.country.state.town.info.y': -33.148521423339844, + 'location.country.state.town.info.z': 27.572303771972656} + assert result == expected
- [ ] closes #21158 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Continue after pop so you dont pop with the same key twice in a row
https://api.github.com/repos/pandas-dev/pandas/pulls/21164
2018-05-22T04:39:53Z
2018-06-07T15:58:48Z
2018-06-07T15:58:48Z
2018-06-12T16:30:33Z
Cleanup clipboard tests
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 98c0effabec84..80fddd50fc9a8 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -9,10 +9,11 @@ from pandas import DataFrame from pandas import read_clipboard from pandas import get_option +from pandas.compat import PY2 from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf from pandas.io.clipboard.exceptions import PyperclipException -from pandas.io.clipboard import clipboard_set +from pandas.io.clipboard import clipboard_set, clipboard_get try: @@ -22,73 +23,134 @@ _DEPS_INSTALLED = 0 +def build_kwargs(sep, excel): + kwargs = {} + if excel != 'default': + kwargs['excel'] = excel + if sep != 'default': + kwargs['sep'] = sep + return kwargs + + +@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', + 'colwidth', 'mixed', 'float', 'int']) +def df(request): + data_type = request.param + + if data_type == 'delims': + return pd.DataFrame({'a': ['"a,\t"b|c', 'd\tef´'], + 'b': ['hi\'j', 'k\'\'lm']}) + elif data_type == 'utf8': + return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], + 'b': ['øπ∆˚¬', 'œ∑´®']}) + elif data_type == 'string': + return mkdf(5, 3, c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'long': + max_rows = get_option('display.max_rows') + return mkdf(max_rows + 1, 3, + data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'nonascii': + return pd.DataFrame({'en': 'in English'.split(), + 'es': 'en español'.split()}) + elif data_type == 'colwidth': + _cw = get_option('display.max_colwidth') + 1 + return mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'mixed': + return DataFrame({'a': np.arange(1.0, 6.0) + 0.01, + 'b': np.arange(1, 6), + 'c': list('abcde')}) + elif data_type == 'float': + return mkdf(5, 3, data_gen_f=lambda r, c: float(r) + 0.01, + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + elif data_type == 'int': + return mkdf(5, 3, data_gen_f=lambda *args: randint(2), + c_idx_type='s', r_idx_type='i', + c_idx_names=[None], r_idx_names=[None]) + else: + raise ValueError + + @pytest.mark.single @pytest.mark.skipif(not _DEPS_INSTALLED, reason="clipboard primitives not installed") class TestClipboard(object): - - @classmethod - def setup_class(cls): - cls.data = {} - cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['float'] = mkdf(5, 3, - data_gen_f=lambda r, c: float(r) + 0.01, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01, - 'b': np.arange(1, 6), - 'c': list('abcde')}) - - # Test columns exceeding "max_colwidth" (GH8305) - _cw = get_option('display.max_colwidth') + 1 - cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw, - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test GH-5346 - max_rows = get_option('display.max_rows') - cls.data['longdf'] = mkdf(max_rows + 1, 3, - data_gen_f=lambda *args: randint(2), - c_idx_type='s', r_idx_type='i', - c_idx_names=[None], r_idx_names=[None]) - # Test for non-ascii text: GH9263 - cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(), - 'es': 'en español'.split()}) - # unicode round trip test for GH 13747, GH 12529 - cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], - 'b': ['øπ∆˚¬', 'œ∑´®']}) - cls.data_types = list(cls.data.keys()) - - @classmethod - def teardown_class(cls): - del cls.data_types, cls.data - - def check_round_trip_frame(self, data_type, excel=None, sep=None, + def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None): - data = self.data[data_type] data.to_clipboard(excel=excel, sep=sep, encoding=encoding) - if sep is not None: - result = read_clipboard(sep=sep, index_col=0, encoding=encoding) - else: - result = read_clipboard(encoding=encoding) + result = read_clipboard(sep=sep or '\t', index_col=0, + encoding=encoding) tm.assert_frame_equal(data, result, check_dtype=False) - def test_round_trip_frame_sep(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, sep=',') - self.check_round_trip_frame(dt, sep=r'\s+') - self.check_round_trip_frame(dt, sep='|') - - def test_round_trip_frame_string(self): - for dt in self.data_types: - self.check_round_trip_frame(dt, excel=False) - - def test_round_trip_frame(self): - for dt in self.data_types: - self.check_round_trip_frame(dt) + # Test that default arguments copy as tab delimited + @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' + 'Issue in #21104, Fixed in #21111') + def test_round_trip_frame(self, df): + self.check_round_trip_frame(df) + + # Test that explicit delimiters are respected + @pytest.mark.parametrize('sep', ['\t', ',', '|']) + def test_round_trip_frame_sep(self, df, sep): + self.check_round_trip_frame(df, sep=sep) + + # Test white space separator + @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " + "aren't handled correctly in default c engine. Fixed " + "in #21111 by defaulting to python engine for " + "whitespace separator") + def test_round_trip_frame_string(self, df): + df.to_clipboard(excel=False, sep=None) + result = read_clipboard() + assert df.to_string() == result.to_string() + assert df.shape == result.shape + + # Two character separator is not supported in to_clipboard + # Test that multi-character separators are not silently passed + @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") + def test_excel_sep_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=True, sep=r'\t') + + # Separator is ignored when excel=False and should produce a warning + @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") + def test_copy_delim_warning(self, df): + with tm.assert_produces_warning(): + df.to_clipboard(excel=False, sep='\t') + + # Tests that the default behavior of to_clipboard is tab + # delimited and excel="True" + @pytest.mark.xfail(reason="to_clipboard defaults to space delim. Issue in " + "#21104, Fixed in #21111") + @pytest.mark.parametrize('sep', ['\t', None, 'default']) + @pytest.mark.parametrize('excel', [True, None, 'default']) + def test_clipboard_copy_tabs_default(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + if PY2: + # to_clipboard copies unicode, to_csv produces bytes. This is + # expected behavior + assert clipboard_get().encode('utf-8') == df.to_csv(sep='\t') + else: + assert clipboard_get() == df.to_csv(sep='\t') + + # Tests reading of white space separated tables + @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " + "aren't handled correctly. in default c engine. Fixed " + "in #21111 by defaulting to python engine for " + "whitespace separator") + @pytest.mark.parametrize('sep', [None, 'default']) + @pytest.mark.parametrize('excel', [False]) + def test_clipboard_copy_strings(self, sep, excel, df): + kwargs = build_kwargs(sep, excel) + df.to_clipboard(**kwargs) + result = read_clipboard(sep=r'\s+') + assert result.to_string() == df.to_string() + assert df.shape == result.shape def test_read_clipboard_infer_excel(self): # gh-19010: avoid warnings @@ -124,15 +186,15 @@ def test_read_clipboard_infer_excel(self): tm.assert_frame_equal(res, exp) - def test_invalid_encoding(self): + def test_invalid_encoding(self, df): # test case for testing invalid encoding - data = self.data['string'] with pytest.raises(ValueError): - data.to_clipboard(encoding='ascii') + df.to_clipboard(encoding='ascii') with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') - def test_round_trip_valid_encodings(self): - for enc in ['UTF-8', 'utf-8', 'utf8']: - for dt in self.data_types: - self.check_round_trip_frame(dt, encoding=enc) + @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' + 'Issue in #21104, Fixed in #21111') + @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) + def test_round_trip_valid_encodings(self, enc, df): + self.check_round_trip_frame(df, encoding=enc)
As requested in #21111, I've refactored the original clipboard tests as well as adding my own. The set of tests have been improved to include testing of: - failure of tab delimited tables (#21104) - errors relating to copying and reading space-delimited tables - copy and pasting tables where the text contains quotes or delimiters As a result, these tests will fail on the current master branch, but should succeed on the commit in #21111 @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/21163
2018-05-22T02:52:55Z
2018-06-26T22:19:42Z
2018-06-26T22:19:42Z
2018-07-02T15:48:18Z
BUG: Fix interval_range when start/periods or end/periods are specified with float start/end
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 44f7280d5535f..a071d7f3f5534 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -77,6 +77,7 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) +- Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`) - I/O diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 408a8cc435b63..8f8d8760583ce 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1572,6 +1572,10 @@ def interval_range(start=None, end=None, periods=None, freq=None, periods += 1 if is_number(endpoint): + # force consistency between start/end/freq (lower end if freq skips it) + if com._all_not_none(start, end, freq): + end -= (end - start) % freq + # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 @@ -1580,10 +1584,6 @@ def interval_range(start=None, end=None, periods=None, freq=None, elif end is None: end = start + (periods - 1) * freq - # force end to be consistent with freq (lower if freq skips end) - if freq is not None: - end -= end % freq - breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com._not_none(start, end, freq)): # np.linspace always produces float output diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 0fadfcf0c7f28..29fe2b0185662 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -110,6 +110,8 @@ def test_constructor_timedelta(self, closed, name, freq, periods): @pytest.mark.parametrize('start, end, freq, expected_endpoint', [ (0, 10, 3, 9), + (0, 10, 1.5, 9), + (0.5, 10, 3, 9.5), (Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')), (Timestamp('2018-01-01'), Timestamp('2018-02-09'), @@ -125,6 +127,22 @@ def test_early_truncation(self, start, end, freq, expected_endpoint): result_endpoint = result.right[-1] assert result_endpoint == expected_endpoint + @pytest.mark.parametrize('start, end, freq', [ + (0.5, None, None), + (None, 4.5, None), + (0.5, None, 1.5), + (None, 6.5, 1.5)]) + def test_no_invalid_float_truncation(self, start, end, freq): + # GH 21161 + if freq is None: + breaks = [0.5, 1.5, 2.5, 3.5, 4.5] + else: + breaks = [0.5, 2.0, 3.5, 5.0, 6.5] + expected = IntervalIndex.from_breaks(breaks) + + result = interval_range(start=start, end=end, periods=4, freq=freq) + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('start, mid, end', [ (Timestamp('2018-03-10', tz='US/Eastern'), Timestamp('2018-03-10 23:30:00', tz='US/Eastern'),
- [X] closes #21161 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Should be good for 0.23.1; the code changes only hit the `interval_range` function, which is outside the `IntervalIndex` class, so should be independent of any enhancements related to `IntervalIndex` that we might want to push to 0.24.0.
https://api.github.com/repos/pandas-dev/pandas/pulls/21162
2018-05-22T00:42:16Z
2018-05-23T04:22:15Z
2018-05-23T04:22:14Z
2018-06-08T17:14:49Z
ENH: Integer NA Extension Array
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a0076118a28a7..4d5c34b883d18 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -13,6 +13,7 @@ v0.24.0 (Month XX, 2018) New features ~~~~~~~~~~~~ + - ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`) .. _whatsnew_0240.enhancements.extension_array_operators: @@ -31,6 +32,62 @@ See the :ref:`ExtensionArray Operator Support <extending.extension.operator>` documentation section for details on both ways of adding operator support. +.. _whatsnew_0240.enhancements.intna: + +Optional Integer NA Support +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`. +Here is an example of the usage. + +We can construct a ``Series`` with the specified dtype. The dtype string ``Int64`` is a pandas ``ExtensionDtype``. Specifying a list or array using the traditional missing value +marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`) + +.. ipython:: python + + s = pd.Series([1, 2, np.nan], dtype='Int64') + s + + +Operations on these dtypes will propagate ``NaN`` as other pandas operations. + +.. ipython:: python + + # arithmetic + s + 1 + + # comparison + s == 1 + + # indexing + s.iloc[1:3] + + # operate with other dtypes + s + s.iloc[1:3].astype('Int8') + + # coerce when needed + s + 0.01 + +These dtypes can operate as part of of ``DataFrame``. + +.. ipython:: python + + df = pd.DataFrame({'A': s, 'B': [1, 1, 3], 'C': list('aab')}) + df + df.dtypes + + +These dtypes can be merged & reshaped & casted. + +.. ipython:: python + + pd.concat([df[['A']], df[['B', 'C']]], axis=1).dtypes + df['A'].astype(float) + +.. warning:: + + The Integer NA support currently uses the captilized dtype version, e.g. ``Int8`` as compared to the traditional ``int8``. This may be changed at a future date. + .. _whatsnew_0240.enhancements.read_html: ``read_html`` Enhancements @@ -256,6 +313,7 @@ Previous Behavior: ExtensionType Changes ^^^^^^^^^^^^^^^^^^^^^ +- ``ExtensionArray`` has gained the abstract methods ``.dropna()`` (:issue:`21185`) - ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) - The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 83a781cdd38fd..9132c74091410 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -1,7 +1,10 @@ from .base import (ExtensionArray, # noqa + ExtensionOpsMixin, ExtensionScalarOpsMixin) from .categorical import Categorical # noqa from .datetimes import DatetimeArrayMixin # noqa from .interval import IntervalArray # noqa from .period import PeriodArrayMixin # noqa from .timedeltas import TimedeltaArrayMixin # noqa +from .integer import ( # noqa + IntegerArray, to_integer_array) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fe4e461b0bd4f..01ed085dd2b9f 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -12,8 +12,8 @@ from pandas.errors import AbstractMethodError from pandas.compat.numpy import function as nv from pandas.compat import set_function_name, PY3 -from pandas.core.dtypes.common import is_list_like from pandas.core import ops +from pandas.core.dtypes.common import is_list_like _not_implemented_message = "{} does not implement {}." @@ -88,7 +88,7 @@ class ExtensionArray(object): # Constructors # ------------------------------------------------------------------------ @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): """Construct a new ExtensionArray from a sequence of scalars. Parameters @@ -96,8 +96,11 @@ def _from_sequence(cls, scalars, copy=False): scalars : Sequence Each element will be an instance of the scalar type for this array, ``cls.dtype.type``. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. copy : boolean, default False - if True, copy the underlying data + If True, copy the underlying data. Returns ------- ExtensionArray @@ -378,7 +381,7 @@ def fillna(self, value=None, method=None, limit=None): func = pad_1d if method == 'pad' else backfill_1d new_values = func(self.astype(object), limit=limit, mask=mask) - new_values = self._from_sequence(new_values) + new_values = self._from_sequence(new_values, dtype=self.dtype) else: # fill with value new_values = self.copy() @@ -407,7 +410,7 @@ def unique(self): from pandas import unique uniques = unique(self.astype(object)) - return self._from_sequence(uniques) + return self._from_sequence(uniques, dtype=self.dtype) def _values_for_factorize(self): # type: () -> Tuple[ndarray, Any] @@ -559,7 +562,7 @@ def take(self, indices, allow_fill=False, fill_value=None): result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill) - return self._from_sequence(result) + return self._from_sequence(result, dtype=self.dtype) """ # Implementer note: The `fill_value` parameter should be a user-facing # value, an instance of self.dtype.type. When passed `fill_value=None`, diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 973a8af76bb07..0d73b2c60d76d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -488,8 +488,8 @@ def _constructor(self): return Categorical @classmethod - def _from_sequence(cls, scalars): - return Categorical(scalars) + def _from_sequence(cls, scalars, dtype=None, copy=False): + return Categorical(scalars, dtype=dtype) def copy(self): """ Copy constructor. """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py new file mode 100644 index 0000000000000..c126117060c3d --- /dev/null +++ b/pandas/core/arrays/integer.py @@ -0,0 +1,599 @@ +import sys +import warnings +import copy +import numpy as np + +from pandas._libs.lib import infer_dtype +from pandas.util._decorators import cache_readonly +from pandas.compat import u, range +from pandas.compat import set_function_name + +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass +from pandas.core.dtypes.common import ( + is_integer, is_scalar, is_float, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + is_list_like) +from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.dtypes import registry +from pandas.core.dtypes.missing import isna, notna + +from pandas.io.formats.printing import ( + format_object_summary, format_object_attrs, default_pprint) + + +class _IntegerDtype(ExtensionDtype): + """ + An ExtensionDtype to hold a single size & kind of integer dtype. + + These specific implementations are subclasses of the non-public + _IntegerDtype. For example we have Int8Dtype to represnt signed int 8s. + + The attributes name & type are set when these subclasses are created. + """ + name = None + type = None + na_value = np.nan + + @cache_readonly + def is_signed_integer(self): + return self.kind == 'i' + + @cache_readonly + def is_unsigned_integer(self): + return self.kind == 'u' + + @cache_readonly + def numpy_dtype(self): + """ Return an instance of our numpy dtype """ + return np.dtype(self.type) + + @cache_readonly + def kind(self): + return self.numpy_dtype.kind + + @classmethod + def construct_array_type(cls): + """Return the array type associated with this dtype + + Returns + ------- + type + """ + return IntegerArray + + @classmethod + def construct_from_string(cls, string): + """ + Construction from a string, raise a TypeError if not + possible + """ + if string == cls.name: + return cls() + raise TypeError("Cannot construct a '{}' from " + "'{}'".format(cls, string)) + + +def to_integer_array(values, dtype=None): + """ + Infer and return an integer array of the values. + + Parameters + ---------- + values : 1D list-like + dtype : dtype, optional + dtype to coerce + + Returns + ------- + IntegerArray + + Raises + ------ + TypeError if incompatible types + """ + return IntegerArray(values, dtype=dtype, copy=False) + + +def safe_cast(values, dtype, copy): + """ + Safely cast the values to the dtype if they + are equivalent, meaning floats must be equivalent to the + ints. + + """ + + try: + return values.astype(dtype, casting='safe', copy=copy) + except TypeError: + + casted = values.astype(dtype, copy=copy) + if (casted == values).all(): + return casted + + raise TypeError("cannot safely cast non-equivalent {} to {}".format( + values.dtype, np.dtype(dtype))) + + +def coerce_to_array(values, dtype, mask=None, copy=False): + """ + Coerce the input values array to numpy arrays with a mask + + Parameters + ---------- + values : 1D list-like + dtype : integer dtype + mask : boolean 1D array, optional + copy : boolean, default False + if True, copy the input + + Returns + ------- + tuple of (values, mask) + """ + if dtype is not None: + if not issubclass(type(dtype), _IntegerDtype): + try: + dtype = _dtypes[str(np.dtype(dtype))] + except KeyError: + raise ValueError("invalid dtype specified {}".format(dtype)) + + if isinstance(values, IntegerArray): + values, mask = values._data, values._mask + if dtype is not None: + values = values.astype(dtype.numpy_dtype, copy=False) + + if copy: + values = values.copy() + mask = mask.copy() + return values, mask + + values = np.array(values, copy=copy) + if is_object_dtype(values): + inferred_type = infer_dtype(values) + if inferred_type not in ['floating', 'integer', + 'mixed-integer', 'mixed-integer-float']: + raise TypeError("{} cannot be converted to an IntegerDtype".format( + values.dtype)) + + elif not (is_integer_dtype(values) or is_float_dtype(values)): + raise TypeError("{} cannot be converted to an IntegerDtype".format( + values.dtype)) + + if mask is None: + mask = isna(values) + else: + assert len(mask) == len(values) + + if not values.ndim == 1: + raise TypeError("values must be a 1D list-like") + if not mask.ndim == 1: + raise TypeError("mask must be a 1D list-like") + + # infer dtype if needed + if dtype is None: + if is_integer_dtype(values): + dtype = values.dtype + else: + dtype = np.dtype('int64') + else: + dtype = dtype.type + + # if we are float, let's make sure that we can + # safely cast + + # we copy as need to coerce here + if mask.any(): + values = values.copy() + values[mask] = 1 + values = safe_cast(values, dtype, copy=False) + else: + values = safe_cast(values, dtype, copy=False) + + return values, mask + + +class IntegerArray(ExtensionArray, ExtensionOpsMixin): + """ + We represent an IntegerArray with 2 numpy arrays + - data: contains a numpy integer array of the appropriate dtype + - mask: a boolean array holding a mask on the data, False is missing + """ + + @cache_readonly + def dtype(self): + return _dtypes[str(self._data.dtype)] + + def __init__(self, values, mask=None, dtype=None, copy=False): + """ + Parameters + ---------- + values : 1D list-like / IntegerArray + mask : 1D list-like, optional + dtype : subclass of _IntegerDtype, optional + copy : bool, default False + + Returns + ------- + IntegerArray + """ + self._data, self._mask = coerce_to_array( + values, dtype=dtype, mask=mask, copy=copy) + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values, dtype=original.dtype) + + def __getitem__(self, item): + if is_integer(item): + if self._mask[item]: + return self.dtype.na_value + return self._data[item] + return type(self)(self._data[item], + mask=self._mask[item], + dtype=self.dtype) + + def _coerce_to_ndarray(self): + """ + coerce to an ndarary of object dtype + """ + + # TODO(jreback) make this better + data = self._data.astype(object) + data[self._mask] = self._na_value + return data + + def __array__(self, dtype=None): + """ + the array interface, return my values + We return an object array here to preserve our scalar values + """ + return self._coerce_to_ndarray() + + def __iter__(self): + """Iterate over elements of the array. + + """ + # This needs to be implemented so that pandas recognizes extension + # arrays as list-like. The default implementation makes successive + # calls to ``__getitem__``, which may be slower than necessary. + for i in range(len(self)): + if self._mask[i]: + yield self.dtype.na_value + else: + yield self._data[i] + + def _formatting_values(self): + # type: () -> np.ndarray + return self._coerce_to_ndarray() + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + # we always fill with 1 internally + # to avoid upcasting + data_fill_value = 1 if isna(fill_value) else fill_value + result = take(self._data, indexer, fill_value=data_fill_value, + allow_fill=allow_fill) + + mask = take(self._mask, indexer, fill_value=True, + allow_fill=allow_fill) + + # if we are filling + # we only fill where the indexer is null + # not existing missing values + # TODO(jreback) what if we have a non-na float as a fill value? + if allow_fill and notna(fill_value): + fill_mask = np.asarray(indexer) == -1 + result[fill_mask] = fill_value + mask = mask ^ fill_mask + + return type(self)(result, mask=mask, dtype=self.dtype, copy=False) + + def copy(self, deep=False): + data, mask = self._data, self._mask + if deep: + data = copy.deepcopy(data) + mask = copy.deepcopy(mask) + else: + data = data.copy() + mask = mask.copy() + return type(self)(data, mask, dtype=self.dtype, copy=False) + + def __setitem__(self, key, value): + _is_scalar = is_scalar(value) + if _is_scalar: + value = [value] + value, mask = coerce_to_array(value, dtype=self.dtype) + + if _is_scalar: + value = value[0] + mask = mask[0] + + self._data[key] = value + self._mask[key] = mask + + def __len__(self): + return len(self._data) + + def __repr__(self): + """ + Return a string representation for this object. + + Invoked by unicode(df) in py2 only. Yields a Unicode String in both + py2/py3. + """ + klass = self.__class__.__name__ + data = format_object_summary(self, default_pprint, False) + attrs = format_object_attrs(self) + space = " " + + prepr = (u(",%s") % + space).join(u("%s=%s") % (k, v) for k, v in attrs) + + res = u("%s(%s%s)") % (klass, data, prepr) + + return res + + @property + def nbytes(self): + return self._data.nbytes + self._mask.nbytes + + def isna(self): + return self._mask + + @property + def _na_value(self): + return np.nan + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x._data for x in to_concat]) + mask = np.concatenate([x._mask for x in to_concat]) + return cls(data, mask=mask, dtype=to_concat[0].dtype) + + def astype(self, dtype, copy=True): + """Cast to a NumPy array or IntegerArray with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ndarray or IntegerArray + NumPy ndarray or IntergerArray with 'dtype' for its dtype. + + Raises + ------ + TypeError + if incompatible type with an IntegerDtype, equivalent of same_kind + casting + """ + + # if we are astyping to an existing IntegerDtype we can fastpath + if isinstance(dtype, _IntegerDtype): + result = self._data.astype(dtype.numpy_dtype, + casting='same_kind', copy=False) + return type(self)(result, mask=self._mask, + dtype=dtype, copy=False) + + # coerce + data = self._coerce_to_ndarray() + return data.astype(dtype=dtype, copy=False) + + @property + def _ndarray_values(self): + # type: () -> np.ndarray + """Internal pandas method for lossy conversion to a NumPy ndarray. + + This method is not part of the pandas interface. + + The expectation is that this is cheap to compute, and is primarily + used for interacting with our indexers. + """ + return self._data + + def value_counts(self, dropna=True): + """ + Returns a Series containing counts of each category. + + Every category will have an entry, even those with a count of 0. + + Parameters + ---------- + dropna : boolean, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + + """ + + from pandas import Index, Series + + # compute counts on the data with no nans + data = self._data[~self._mask] + value_counts = Index(data).value_counts() + array = value_counts.values + + # TODO(extension) + # if we have allow Index to hold an ExtensionArray + # this is easier + index = value_counts.index.astype(object) + + # if we want nans, count the mask + if not dropna: + + # TODO(extension) + # appending to an Index *always* infers + # w/o passing the dtype + array = np.append(array, [self._mask.sum()]) + index = Index(np.concatenate( + [index.values, + np.array([np.nan], dtype=object)]), dtype=object) + + return Series(array, index=index) + + def _values_for_argsort(self): + # type: () -> ndarray + """Return values for sorting. + + Returns + ------- + ndarray + The transformed values should maintain the ordering between values + within the array. + + See Also + -------- + ExtensionArray.argsort + """ + data = self._data.copy() + data[self._mask] = data.min() - 1 + return data + + @classmethod + def _create_comparison_method(cls, op): + def cmp_method(self, other): + + op_name = op.__name__ + mask = None + if isinstance(other, IntegerArray): + other, mask = other._data, other._mask + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 0 and len(self) != len(other): + raise ValueError('Lengths must match to compare') + + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(record=True): + with np.errstate(all='ignore'): + result = op(self._data, other) + + # nans propagate + if mask is None: + mask = self._mask + else: + mask = self._mask | mask + + result[mask] = True if op_name == 'ne' else False + return result + + name = '__{name}__'.format(name=op.__name__) + return set_function_name(cmp_method, name, cls) + + def _maybe_mask_result(self, result, mask, other, op_name): + """ + Parameters + ---------- + result : array-like + mask : array-like bool + other : scalar or array-like + op_name : str + """ + + # may need to fill infs + # and mask wraparound + if is_float_dtype(result): + mask |= (result == np.inf) | (result == -np.inf) + + # if we have a float operand we are by-definition + # a float result + # or our op is a divide + if ((is_float_dtype(other) or is_float(other)) or + (op_name in ['rtruediv', 'truediv', 'rdiv', 'div'])): + result[mask] = np.nan + return result + + return type(self)(result, mask=mask, dtype=self.dtype, copy=False) + + @classmethod + def _create_arithmetic_method(cls, op): + def integer_arithmetic_method(self, other): + + op_name = op.__name__ + mask = None + if isinstance(other, (ABCSeries, ABCIndexClass)): + other = getattr(other, 'values', other) + + if isinstance(other, IntegerArray): + other, mask = other._data, other._mask + elif getattr(other, 'ndim', 0) > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures") + elif is_list_like(other): + other = np.asarray(other) + if not other.ndim: + other = other.item() + elif other.ndim == 1: + if not (is_float_dtype(other) or is_integer_dtype(other)): + raise TypeError( + "can only perform ops with numeric values") + else: + if not (is_float(other) or is_integer(other)): + raise TypeError("can only perform ops with numeric values") + + # nans propagate + if mask is None: + mask = self._mask + else: + mask = self._mask | mask + + with np.errstate(all='ignore'): + result = op(self._data, other) + + # divmod returns a tuple + if op_name == 'divmod': + div, mod = result + return (self._maybe_mask_result(div, mask, other, 'floordiv'), + self._maybe_mask_result(mod, mask, other, 'mod')) + + return self._maybe_mask_result(result, mask, other, op_name) + + name = '__{name}__'.format(name=op.__name__) + return set_function_name(integer_arithmetic_method, name, cls) + + +IntegerArray._add_arithmetic_ops() +IntegerArray._add_comparison_ops() + + +module = sys.modules[__name__] + + +# create the Dtype +_dtypes = {} +for dtype in ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64']: + + if dtype.startswith('u'): + name = "U{}".format(dtype[1:].capitalize()) + else: + name = dtype.capitalize() + classname = "{}Dtype".format(name) + attributes_dict = {'type': getattr(np, dtype), + 'name': name} + dtype_type = type(classname, (_IntegerDtype, ), attributes_dict) + setattr(module, classname, dtype_type) + + # register + registry.register(dtype_type) + _dtypes[dtype] = dtype_type() diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c915b272aee8b..2c8853dec4f69 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -191,8 +191,8 @@ def _simple_new(cls, left, right, closed=None, return result @classmethod - def _from_sequence(cls, scalars): - return cls(scalars) + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars, dtype=dtype, copy=copy) @classmethod def _from_factorized(cls, values, original): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8675d3be06287..ead7b39309f5e 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -651,7 +651,8 @@ def astype_nansafe(arr, dtype, copy=True): # dispatch on extension dtype if needed if is_extension_array_dtype(dtype): - return dtype.array_type._from_sequence(arr, copy=copy) + return dtype.construct_array_type()._from_sequence( + arr, dtype=dtype, copy=copy) if not isinstance(dtype, np.dtype): dtype = pandas_dtype(dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5a2f91d775fb2..355bf58540219 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1714,6 +1714,11 @@ def is_extension_array_dtype(arr_or_dtype): if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): arr_or_dtype = arr_or_dtype._values + try: + arr_or_dtype = pandas_dtype(arr_or_dtype) + except TypeError: + pass + return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) @@ -1804,6 +1809,9 @@ def _get_dtype(arr_or_dtype): TypeError : The passed in object is None. """ + # TODO(extension) + # replace with pandas_dtype + if arr_or_dtype is None: raise TypeError("Cannot deduce dtype from null object") if isinstance(arr_or_dtype, np.dtype): @@ -1851,6 +1859,8 @@ def _get_dtype_type(arr_or_dtype): passed in array or dtype object. """ + # TODO(extension) + # replace with pandas_dtype if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.type elif isinstance(arr_or_dtype, type): @@ -1976,7 +1986,17 @@ def pandas_dtype(dtype): Returns ------- np.dtype or a pandas dtype + + Raises + ------ + TypeError if not a dtype + """ + # short-circuit + if isinstance(dtype, np.ndarray): + return dtype.dtype + elif isinstance(dtype, np.dtype): + return dtype # registered extension types result = registry.find(dtype) @@ -1984,13 +2004,19 @@ def pandas_dtype(dtype): return result # un-registered extension types - if isinstance(dtype, ExtensionDtype): + elif isinstance(dtype, ExtensionDtype): return dtype + # try a numpy dtype + # raise a consistent TypeError if failed try: npdtype = np.dtype(dtype) - except (TypeError, ValueError): - raise + except Exception: + # we don't want to force a repr of the non-string + if not isinstance(dtype, string_types): + raise TypeError("data type not understood") + raise TypeError("data type '{}' not understood".format( + dtype)) # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will @@ -2000,6 +2026,6 @@ def pandas_dtype(dtype): if dtype in [object, np.object_, 'object', 'O']: return npdtype elif npdtype.kind == 'O': - raise TypeError('dtype {dtype} not understood'.format(dtype=dtype)) + raise TypeError("dtype '{}' not understood".format(dtype)) return npdtype diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4a41b14cee071..5768fd361c3db 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -534,6 +534,7 @@ def _concat_index_asobject(to_concat, name=None): to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] + return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..93982b4466a7f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -44,6 +44,7 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, + is_extension_array_dtype, is_hashable, is_iterator, is_list_like, is_scalar) @@ -260,19 +261,33 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, name=name) # categorical - if is_categorical_dtype(data) or is_categorical_dtype(dtype): + elif is_categorical_dtype(data) or is_categorical_dtype(dtype): from .category import CategoricalIndex return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) # interval - if ((is_interval_dtype(data) or is_interval_dtype(dtype)) and - not is_object_dtype(dtype)): + elif ((is_interval_dtype(data) or is_interval_dtype(dtype)) and + not is_object_dtype(dtype)): from .interval import IntervalIndex closed = kwargs.get('closed', None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) + # extension dtype + elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): + data = np.asarray(data) + if not (dtype is None or is_object_dtype(dtype)): + + # coerce to the provided dtype + data = dtype.construct_array_type()( + data, dtype=dtype, copy=False) + + # coerce to the object dtype + data = data.astype(object) + return Index(data, dtype=object, copy=copy, name=name, + **kwargs) + # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): @@ -1170,10 +1185,15 @@ def _to_embed(self, keep_tz=False, dtype=None): def astype(self, dtype, copy=True): if is_dtype_equal(self.dtype, dtype): return self.copy() if copy else self + elif is_categorical_dtype(dtype): from .category import CategoricalIndex return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy) + + elif is_extension_array_dtype(dtype): + return Index(np.asarray(self), dtype=dtype, copy=copy) + try: if is_datetime64tz_dtype(dtype): from pandas import DatetimeIndex diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a5418dcc1e7f..5a87a8368dc88 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -39,7 +39,8 @@ is_re, is_re_compilable, is_scalar, - _get_dtype) + _get_dtype, + pandas_dtype) from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, maybe_upcast, @@ -631,9 +632,10 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, return self.make_block(Categorical(self.values, dtype=dtype)) + # convert dtypes if needed + dtype = pandas_dtype(dtype) + # astype processing - if not is_extension_array_dtype(dtype): - dtype = np.dtype(dtype) if is_dtype_equal(self.dtype, dtype): if copy: return self.copy() diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 16820dcbb55bc..a46c19e2d399c 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -638,7 +638,8 @@ def fill_zeros(result, x, y, name, fill): # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): - signs = np.sign(y if name.startswith(('r', '__r')) else x) + signs = y if name.startswith(('r', '__r')) else x + signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index bccc5a587bd83..a8c1b954a61b7 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -13,7 +13,7 @@ import numpy as np import pandas as pd -from pandas._libs import algos as libalgos, ops as libops +from pandas._libs import lib, algos as libalgos, ops as libops from pandas import compat from pandas.util._decorators import Appender @@ -135,6 +135,13 @@ def rfloordiv(left, right): def rmod(left, right): + # check if right is a string as % is the string + # formatting operation; this is a TypeError + # otherwise perform the op + if isinstance(right, compat.string_types): + raise TypeError("{typ} cannot perform the operation mod".format( + typ=type(left).__name__)) + return right % left @@ -1018,7 +1025,7 @@ def _align_method_SERIES(left, right, align_asobject=False): return left, right -def _construct_result(left, result, index, name, dtype): +def _construct_result(left, result, index, name, dtype=None): """ If the raw op result has a non-None name (e.g. it is an Index object) and the name argument is None, then passing name to the constructor will @@ -1030,7 +1037,7 @@ def _construct_result(left, result, index, name, dtype): return out -def _construct_divmod_result(left, result, index, name, dtype): +def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """ constructor = left._constructor @@ -1048,16 +1055,39 @@ def dispatch_to_extension_op(op, left, right): # The op calls will raise TypeError if the op is not defined # on the ExtensionArray + # TODO(jreback) + # we need to listify to avoid ndarray, or non-same-type extension array + # dispatching + if is_extension_array_dtype(left): - res_values = op(left.values, right) + + new_left = left.values + if isinstance(right, np.ndarray): + + # handle numpy scalars, this is a PITA + # TODO(jreback) + new_right = lib.item_from_zerodim(right) + if is_scalar(new_right): + new_right = [new_right] + new_right = list(new_right) + elif is_extension_array_dtype(right) and type(left) != type(right): + new_right = list(new_right) + else: + new_right = right + else: - # We know that left is not ExtensionArray and is Series and right is - # ExtensionArray. Want to force ExtensionArray op to get called - res_values = op(list(left.values), right.values) + new_left = list(left.values) + new_right = right + + res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) - return left._constructor(res_values, index=left.index, - name=res_name) + + if op.__name__ == 'divmod': + return _construct_divmod_result( + left, res_values, left.index, res_name) + + return _construct_result(left, res_values, left.index, res_name) def _arith_method_SERIES(cls, op, special): @@ -1074,7 +1104,6 @@ def _arith_method_SERIES(cls, op, special): def na_op(x, y): import pandas.core.computation.expressions as expressions - try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: @@ -1095,6 +1124,20 @@ def na_op(x, y): return result def safe_na_op(lvalues, rvalues): + """ + return the result of evaluating na_op on the passed in values + + try coercion to object type if the native types are not compatible + + Parameters + ---------- + lvalues : array-like + rvalues : array-like + + Raises + ------ + TypeError: invalid operation + """ try: with np.errstate(all='ignore'): return na_op(lvalues, rvalues) @@ -1105,14 +1148,21 @@ def safe_na_op(lvalues, rvalues): raise def wrapper(left, right): - if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) - if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + if is_categorical_dtype(left): + raise TypeError("{typ} cannot perform the operation " + "{op}".format(typ=type(left).__name__, op=str_rep)) + + elif (is_extension_array_dtype(left) or + is_extension_array_dtype(right)): + return dispatch_to_extension_op(op, left, right) + + elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, @@ -1124,15 +1174,6 @@ def wrapper(left, right): index=left.index, name=res_name, dtype=result.dtype) - elif is_categorical_dtype(left): - raise TypeError("{typ} cannot perform the operation " - "{op}".format(typ=type(left).__name__, op=str_rep)) - - elif (is_extension_array_dtype(left) or - (is_extension_array_dtype(right) and - not is_categorical_dtype(right))): - return dispatch_to_extension_op(op, left, right) - lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): @@ -1204,6 +1245,9 @@ def _comp_method_SERIES(cls, op, special): masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): + # TODO: + # should have guarantess on what x, y can be type-wise + # Extension Dtypes are not called here # dispatch to the categorical if we have a categorical # in either operand @@ -1312,7 +1356,7 @@ def wrapper(self, other, axis=None): elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and - not is_categorical_dtype(other))): + not is_scalar(other))): return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): diff --git a/pandas/core/series.py b/pandas/core/series.py index 77445159129f2..3571e908fc6a7 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -236,13 +236,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, '`index` argument. `copy` must ' 'be False.') - elif is_extension_array_dtype(data) and dtype is not None: - if not data.dtype.is_dtype(dtype): - raise ValueError("Cannot specify a dtype '{}' with an " - "extension array of a different " - "dtype ('{}').".format(dtype, - data.dtype)) - + elif is_extension_array_dtype(data): + pass elif (isinstance(data, types.GeneratorType) or (compat.PY3 and isinstance(data, map))): data = list(data) @@ -4096,7 +4091,7 @@ def _try_cast(arr, take_fast_path): elif is_extension_array_dtype(dtype): # create an extension array from its dtype array_type = dtype.construct_array_type() - subarr = array_type(subarr, copy=copy) + subarr = array_type(subarr, dtype=dtype, copy=copy) elif dtype is not None and raise_cast_failure: raise @@ -4133,10 +4128,7 @@ def _try_cast(arr, take_fast_path): subarr = data if dtype is not None and not data.dtype.is_dtype(dtype): - msg = ("Cannot coerce extension array to dtype '{typ}'. " - "Do the coercion before passing to the constructor " - "instead.".format(typ=dtype)) - raise ValueError(msg) + subarr = data.astype(dtype) if copy: subarr = data.copy() diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 640b894e2245f..b6b81bb941a59 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -47,7 +47,7 @@ class TestMyDtype(BaseDtypeTests): from .groupby import BaseGroupbyTests # noqa from .interface import BaseInterfaceTests # noqa from .methods import BaseMethodsTests # noqa -from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests # noqa +from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa from .missing import BaseMissingTests # noqa from .reshaping import BaseReshapingTests # noqa from .setitem import BaseSetitemTests # noqa diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 52a12816c8722..2125458e8a0ba 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -54,3 +54,28 @@ def test_array_type(self, data, dtype): def test_array_type_with_arg(self, data, dtype): with pytest.raises(NotImplementedError): dtype.construct_array_type('foo') + + def test_check_dtype(self, data): + dtype = data.dtype + + # check equivalency for using .dtypes + df = pd.DataFrame({'A': pd.Series(data, dtype=dtype), + 'B': data, + 'C': 'foo', 'D': 1}) + + # np.dtype('int64') == 'Int64' == 'int64' + # so can't distinguish + if dtype.name == 'Int64': + expected = pd.Series([True, True, False, True], + index=list('ABCD')) + else: + expected = pd.Series([True, True, False, False], + index=list('ABCD')) + + result = df.dtypes == str(dtype) + self.assert_series_equal(result, expected) + + expected = pd.Series([True, True, False, False], + index=list('ABCD')) + result = df.dtypes.apply(str) == str(dtype) + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index e9df49780f119..886a0f66b5f66 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -226,12 +226,14 @@ def test_reindex(self, data, na_value): n = len(data) result = s.reindex([-1, 0, n]) expected = pd.Series( - data._from_sequence([na_value, data[0], na_value]), + data._from_sequence([na_value, data[0], na_value], + dtype=s.dtype), index=[-1, 0, n]) self.assert_series_equal(result, expected) result = s.reindex([n, n + 1]) - expected = pd.Series(data._from_sequence([na_value, na_value]), + expected = pd.Series(data._from_sequence([na_value, na_value], + dtype=s.dtype), index=[n, n + 1]) self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index 659b9757ac1e3..f7bfdb8ec218a 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -3,10 +3,12 @@ import operator import pandas as pd +from pandas.core import ops from .base import BaseExtensionTests class BaseOpsUtil(BaseExtensionTests): + def get_op_from_name(self, op_name): short_opname = op_name.strip('_') try: @@ -32,17 +34,38 @@ def _check_op(self, s, op, other, exc=NotImplementedError): with pytest.raises(exc): op(s, other) + def _check_divmod_op(self, s, op, other, exc=NotImplementedError): + # divmod has multiple return values, so check separatly + if exc is None: + result_div, result_mod = op(s, other) + if op is divmod: + expected_div, expected_mod = s // other, s % other + else: + expected_div, expected_mod = other // s, other % s + self.assert_series_equal(result_div, expected_div) + self.assert_series_equal(result_mod, expected_mod) + else: + with pytest.raises(exc): + divmod(s, other) + class BaseArithmeticOpsTests(BaseOpsUtil): """Various Series and DataFrame arithmetic ops methods.""" - def test_arith_scalar(self, data, all_arithmetic_operators): - # scalar + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + # series & scalar op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname(s, op_name, s.iloc[0], exc=TypeError) - def test_arith_array(self, data, all_arithmetic_operators): + @pytest.mark.xfail(run=False, reason="_reduce needs implementation") + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + # frame & scalar + op_name = all_arithmetic_operators + df = pd.DataFrame({'A': data}) + self.check_opname(df, op_name, data[0], exc=TypeError) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) @@ -50,8 +73,8 @@ def test_arith_array(self, data, all_arithmetic_operators): def test_divmod(self, data): s = pd.Series(data) - self._check_op(s, divmod, 1, exc=TypeError) - self._check_op(1, divmod, s, exc=TypeError) + self._check_divmod_op(s, divmod, 1, exc=TypeError) + self._check_divmod_op(1, ops.rdivmod, s, exc=TypeError) def test_error(self, data, all_arithmetic_operators): # invalid ops diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index c83726c5278a5..0340289e0b674 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -82,7 +82,8 @@ def test_concat_columns(self, data, na_value): # non-aligned df2 = pd.DataFrame({'B': [1, 2, 3]}, index=[1, 2, 3]) expected = pd.DataFrame({ - 'A': data._from_sequence(list(data[:3]) + [na_value]), + 'A': data._from_sequence(list(data[:3]) + [na_value], + dtype=data.dtype), 'B': [np.nan, 1, 2, 3]}) result = pd.concat([df1, df2], axis=1) @@ -96,8 +97,10 @@ def test_align(self, data, na_value): r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) # Assumes that the ctor can take a list of scalars of the type - e1 = pd.Series(data._from_sequence(list(a) + [na_value])) - e2 = pd.Series(data._from_sequence([na_value] + list(b))) + e1 = pd.Series(data._from_sequence(list(a) + [na_value], + dtype=data.dtype)) + e2 = pd.Series(data._from_sequence([na_value] + list(b), + dtype=data.dtype)) self.assert_series_equal(r1, e1) self.assert_series_equal(r2, e2) @@ -109,8 +112,10 @@ def test_align_frame(self, data, na_value): ) # Assumes that the ctor can take a list of scalars of the type - e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value])}) - e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b))}) + e1 = pd.DataFrame({'A': data._from_sequence(list(a) + [na_value], + dtype=data.dtype)}) + e2 = pd.DataFrame({'A': data._from_sequence([na_value] + list(b), + dtype=data.dtype)}) self.assert_frame_equal(r1, e1) self.assert_frame_equal(r2, e2) @@ -120,7 +125,8 @@ def test_align_series_frame(self, data, na_value): df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) r1, r2 = ser.align(df) - e1 = pd.Series(data._from_sequence(list(data) + [na_value]), + e1 = pd.Series(data._from_sequence(list(data) + [na_value], + dtype=data.dtype), name=ser.name) self.assert_series_equal(r1, e1) @@ -153,7 +159,8 @@ def test_merge(self, data, na_value): res = pd.merge(df1, df2) exp = pd.DataFrame( {'int1': [1, 1, 2], 'int2': [1, 2, 3], 'key': [0, 0, 1], - 'ext': data._from_sequence([data[0], data[0], data[1]])}) + 'ext': data._from_sequence([data[0], data[0], data[1]], + dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) res = pd.merge(df1, df2, how='outer') @@ -161,5 +168,6 @@ def test_merge(self, data, na_value): {'int1': [1, 1, 2, 3, np.nan], 'int2': [1, 2, 3, np.nan, 4], 'key': [0, 0, 1, 2, 3], 'ext': data._from_sequence( - [data[0], data[0], data[1], data[2], na_value])}) + [data[0], data[0], data[1], data[2], na_value], + dtype=data.dtype)}) self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']]) diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/category/test_categorical.py index 715e8bd40a2d0..76f6b03907ef8 100644 --- a/pandas/tests/extension/category/test_categorical.py +++ b/pandas/tests/extension/category/test_categorical.py @@ -189,11 +189,12 @@ class TestCasting(base.BaseCastingTests): class TestArithmeticOps(base.BaseArithmeticOpsTests): - def test_arith_scalar(self, data, all_arithmetic_operators): + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators if op_name != '__rmod__': - super(TestArithmeticOps, self).test_arith_scalar(data, op_name) + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, op_name) else: pytest.skip('rmod never called when string is first argument') diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 33adebbbe5780..108b8874b3ac5 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -1,6 +1,5 @@ import decimal import numbers -import random import sys import numpy as np @@ -38,7 +37,7 @@ def construct_from_string(cls, string): class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin): dtype = DecimalDtype() - def __init__(self, values, copy=False): + def __init__(self, values, dtype=None, copy=False): for val in values: if not isinstance(val, self.dtype.type): raise TypeError("All values must be of type " + @@ -54,7 +53,7 @@ def __init__(self, values, copy=False): # self._values = self.values = self.data @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): return cls(scalars) @classmethod @@ -117,7 +116,3 @@ def _concat_same_type(cls, to_concat): DecimalArray._add_arithmetic_ops() DecimalArray._add_comparison_ops() - - -def make_data(): - return [decimal.Decimal(random.random()) for _ in range(100)] diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 0832e9f7d08df..bc7237f263b1d 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -1,5 +1,6 @@ import decimal +import random import numpy as np import pandas as pd import pandas.util.testing as tm @@ -7,7 +8,11 @@ from pandas.tests.extension import base -from .array import DecimalDtype, DecimalArray, make_data +from .array import DecimalDtype, DecimalArray + + +def make_data(): + return [decimal.Decimal(random.random()) for _ in range(100)] @pytest.fixture @@ -176,35 +181,28 @@ def test_series_constructor_coerce_data_to_extension_dtype_raises(): pd.Series([0, 1, 2], dtype=DecimalDtype()) -def test_series_constructor_with_same_dtype_ok(): +def test_series_constructor_with_dtype(): arr = DecimalArray([decimal.Decimal('10.0')]) result = pd.Series(arr, dtype=DecimalDtype()) expected = pd.Series(arr) tm.assert_series_equal(result, expected) - -def test_series_constructor_coerce_extension_array_to_dtype_raises(): - arr = DecimalArray([decimal.Decimal('10.0')]) - xpr = r"Cannot specify a dtype 'int64' .* \('decimal'\)." - - with tm.assert_raises_regex(ValueError, xpr): - pd.Series(arr, dtype='int64') + result = pd.Series(arr, dtype='int64') + expected = pd.Series([10]) + tm.assert_series_equal(result, expected) -def test_dataframe_constructor_with_same_dtype_ok(): +def test_dataframe_constructor_with_dtype(): arr = DecimalArray([decimal.Decimal('10.0')]) result = pd.DataFrame({"A": arr}, dtype=DecimalDtype()) expected = pd.DataFrame({"A": arr}) tm.assert_frame_equal(result, expected) - -def test_dataframe_constructor_with_different_dtype_raises(): arr = DecimalArray([decimal.Decimal('10.0')]) - - xpr = "Cannot coerce extension array to dtype 'int64'. " - with tm.assert_raises_regex(ValueError, xpr): - pd.DataFrame({"A": arr}, dtype='int64') + result = pd.DataFrame({"A": arr}, dtype='int64') + expected = pd.DataFrame({"A": [10]}) + tm.assert_frame_equal(result, expected) class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests): @@ -213,7 +211,7 @@ def check_opname(self, s, op_name, other, exc=None): super(TestArithmeticOps, self).check_opname(s, op_name, other, exc=None) - def test_arith_array(self, data, all_arithmetic_operators): + def test_arith_series_with_array(self, data, all_arithmetic_operators): op_name = all_arithmetic_operators s = pd.Series(data) diff --git a/pandas/tests/extension/integer/__init__.py b/pandas/tests/extension/integer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/extension/integer/test_integer.py new file mode 100644 index 0000000000000..451f7488bd38a --- /dev/null +++ b/pandas/tests/extension/integer/test_integer.py @@ -0,0 +1,700 @@ +import numpy as np +import pandas as pd +import pandas.util.testing as tm +import pytest + +from pandas.tests.extension import base +from pandas.api.types import ( + is_integer, is_scalar, is_float, is_float_dtype) +from pandas.core.dtypes.generic import ABCIndexClass + +from pandas.core.arrays import ( + to_integer_array, IntegerArray) +from pandas.core.arrays.integer import ( + Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) + + +def make_data(): + return (list(range(8)) + + [np.nan] + + list(range(10, 98)) + + [np.nan] + + [99, 100]) + + +@pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype]) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + return IntegerArray(make_data(), dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return IntegerArray([np.nan, 1], dtype=dtype) + + +@pytest.fixture +def data_repeated(data): + def gen(count): + for _ in range(count): + yield data + yield gen + + +@pytest.fixture +def data_for_sorting(dtype): + return IntegerArray([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return IntegerArray([1, np.nan, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are np.nan + return lambda x, y: np.isnan(x) and np.isnan(y) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(dtype): + b = 1 + a = 0 + c = 2 + na = np.nan + return IntegerArray([b, b, na, na, a, a, b, c], dtype=dtype) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == 'i' + else: + assert np.dtype(dtype.type).kind == 'u' + assert dtype.name is not None + + +class BaseInteger(object): + + def assert_index_equal(self, left, right, *args, **kwargs): + + left_na = left.isna() + right_na = right.isna() + + tm.assert_numpy_array_equal(left_na, right_na) + return tm.assert_index_equal(left[~left_na], + right[~right_na], + *args, **kwargs) + + def assert_series_equal(self, left, right, *args, **kwargs): + + left_na = left.isna() + right_na = right.isna() + + tm.assert_series_equal(left_na, right_na) + return tm.assert_series_equal(left[~left_na], + right[~right_na], + *args, **kwargs) + + def assert_frame_equal(self, left, right, *args, **kwargs): + # TODO(EA): select_dtypes + tm.assert_index_equal( + left.columns, right.columns, + exact=kwargs.get('check_column_type', 'equiv'), + check_names=kwargs.get('check_names', True), + check_exact=kwargs.get('check_exact', False), + check_categorical=kwargs.get('check_categorical', True), + obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame'))) + + integers = (left.dtypes == 'integer').index + + for col in integers: + self.assert_series_equal(left[col], right[col], + *args, **kwargs) + + left = left.drop(columns=integers) + right = right.drop(columns=integers) + tm.assert_frame_equal(left, right, *args, **kwargs) + + +class TestDtype(BaseInteger, base.BaseDtypeTests): + + @pytest.mark.skip(reason="using multiple dtypes") + def test_is_dtype_unboxes_dtype(self): + # we have multiple dtypes, so skip + pass + + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is IntegerArray + + +class TestArithmeticOps(BaseInteger, base.BaseArithmeticOpsTests): + + def _check_divmod_op(self, s, op, other, exc=None): + super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + + def _check_op(self, s, op_name, other, exc=None): + op = self.get_op_from_name(op_name) + result = op(s, other) + + # compute expected + mask = s.isna() + + # other array is an Integer + if isinstance(other, IntegerArray): + omask = getattr(other, 'mask', None) + mask = getattr(other, 'data', other) + if omask is not None: + mask |= omask + + # float result type or float op + if ((is_float_dtype(other) or is_float(other) or + op_name in ['__rtruediv__', '__truediv__', + '__rdiv__', '__div__'])): + rs = s.astype('float') + expected = op(rs, other) + self._check_op_float(result, expected, mask, s, op_name, other) + + # integer result type + else: + rs = pd.Series(s.values._data) + expected = op(rs, other) + self._check_op_integer(result, expected, mask, s, op_name, other) + + def _check_op_float(self, result, expected, mask, s, op_name, other): + # check comparisions that are resulting in float dtypes + + expected[mask] = np.nan + self.assert_series_equal(result, expected) + + def _check_op_integer(self, result, expected, mask, s, op_name, other): + # check comparisions that are resulting in integer dtypes + + # to compare properly, we convert the expected + # to float, mask to nans and convert infs + # if we have uints then we process as uints + # then conert to float + # and we ultimately want to create a IntArray + # for comparisons + + fill_value = 0 + + # mod/rmod turn floating 0 into NaN while + # integer works as expected (no nan) + if op_name in ['__mod__', '__rmod__']: + if is_scalar(other): + if other == 0: + expected[s.values == 0] = 0 + else: + expected = expected.fillna(0) + else: + expected[(s.values == 0) & + ((expected == 0) | expected.isna())] = 0 + + try: + expected[(expected == np.inf) | (expected == -np.inf)] = fill_value + original = expected + expected = expected.astype(s.dtype) + + except ValueError: + + expected = expected.astype(float) + expected[(expected == np.inf) | (expected == -np.inf)] = fill_value + original = expected + expected = expected.astype(s.dtype) + + expected[mask] = np.nan + + # assert that the expected astype is ok + # (skip for unsigned as they have wrap around) + if not s.dtype.is_unsigned_integer: + original = pd.Series(original) + + # we need to fill with 0's to emulate what an astype('int') does + # (truncation) for certain ops + if op_name in ['__rtruediv__', '__rdiv__']: + mask |= original.isna() + original = original.fillna(0).astype('int') + + original = original.astype('float') + original[mask] = np.nan + self.assert_series_equal(original, expected.astype('float')) + + # assert our expected result + self.assert_series_equal(result, expected) + + def test_arith_integer_array(self, data, all_arithmetic_operators): + # we operate with a rhs of an integer array + + op = all_arithmetic_operators + + s = pd.Series(data) + rhs = pd.Series([1] * len(data), dtype=data.dtype) + rhs.iloc[-1] = np.nan + + self._check_op(s, op, rhs) + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + # scalar + op = all_arithmetic_operators + + s = pd.Series(data) + self._check_op(s, op, 1, exc=TypeError) + + @pytest.mark.xfail(run=False, reason="_reduce needs implementation") + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + # frame & scalar + op = all_arithmetic_operators + + df = pd.DataFrame({'A': data}) + self._check_op(df, op, 1, exc=TypeError) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + # ndarray & other series + op = all_arithmetic_operators + + s = pd.Series(data) + other = np.ones(len(s), dtype=s.dtype.type) + self._check_op(s, op, other, exc=TypeError) + + def test_arith_coerce_scalar(self, data, all_arithmetic_operators): + + op = all_arithmetic_operators + s = pd.Series(data) + + other = 0.01 + self._check_op(s, op, other) + + @pytest.mark.parametrize("other", [1., 1.0, np.array(1.), np.array([1.])]) + def test_arithmetic_conversion(self, all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if if that is equal to an integer + op = self.get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype='Int64') + result = op(s, other) + assert result.dtype is np.dtype('float') + + def test_error(self, data, all_arithmetic_operators): + # invalid ops + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + opa = getattr(data, op) + + # invalid scalars + with pytest.raises(TypeError): + ops('foo') + with pytest.raises(TypeError): + ops(pd.Timestamp('20180101')) + + # invalid array-likes + with pytest.raises(TypeError): + ops(pd.Series('foo', index=s.index)) + + if op != '__rpow__': + # TODO(extension) + # rpow with a datetimelike coerces the integer array incorrectly + with pytest.raises(TypeError): + ops(pd.Series(pd.date_range('20180101', periods=len(s)))) + + # 2d + with pytest.raises(NotImplementedError): + opa(pd.DataFrame({'A': s})) + with pytest.raises(NotImplementedError): + opa(np.arange(len(s)).reshape(-1, len(s))) + + +class TestComparisonOps(BaseInteger, base.BaseComparisonOpsTests): + + def _compare_other(self, s, data, op_name, other): + op = self.get_op_from_name(op_name) + + # array + result = op(s, other) + expected = pd.Series(op(data._data, other)) + + # fill the nan locations + expected[data._mask] = True if op_name == '__ne__' else False + + tm.assert_series_equal(result, expected) + + # series + s = pd.Series(data) + result = op(s, other) + + expected = pd.Series(data._data) + expected = op(expected, other) + + # fill the nan locations + expected[data._mask] = True if op_name == '__ne__' else False + + tm.assert_series_equal(result, expected) + + +class TestInterface(BaseInteger, base.BaseInterfaceTests): + + def test_repr_array(self, data): + result = repr(data) + + # not long + assert '...' not in result + + assert 'dtype=' in result + assert 'IntegerArray' in result + + def test_repr_array_long(self, data): + # some arrays may be able to assert a ... in the repr + with pd.option_context('display.max_seq_items', 1): + result = repr(data) + + assert '...' in result + assert 'length' in result + + +class TestConstructors(BaseInteger, base.BaseConstructorsTests): + + def test_from_dtype_from_float(self, data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) + self.assert_series_equal(result, expected) + + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + self.assert_series_equal(result, expected) + + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + result = pd.Series(dropped, dtype=str(dtype)) + self.assert_series_equal(result, expected) + + +class TestReshaping(BaseInteger, base.BaseReshapingTests): + + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + df1 = pd.DataFrame({'A': data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') + df4 = pd.DataFrame({"A": pd.SparseArray([1, 2, 3])}) + dfs = [df1, df2, df3, df4] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat([x.astype(object) for x in dfs]) + self.assert_frame_equal(result, expected) + + # series + result = pd.concat([x['A'] for x in dfs]) + expected = pd.concat([x['A'].astype(object) for x in dfs]) + self.assert_series_equal(result, expected) + + result = pd.concat([df1, df2]) + expected = pd.concat([df1.astype('object'), df2.astype('object')]) + self.assert_frame_equal(result, expected) + + # concat of an Integer and Int coerces to object dtype + # TODO(jreback) once integrated this would + # be a result of Integer + result = pd.concat([df1['A'], df2['A']]) + expected = pd.concat([df1['A'].astype('object'), + df2['A'].astype('object')]) + self.assert_series_equal(result, expected) + + +class TestGetitem(BaseInteger, base.BaseGetitemTests): + pass + + +class TestMissing(BaseInteger, base.BaseMissingTests): + pass + + +class TestMethods(BaseInteger, base.BaseMethodsTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts( + dropna=dropna).sort_index() + expected.index = expected.index.astype(all_data.dtype) + + self.assert_series_equal(result, expected) + + def test_combine_add(self, data_repeated): + # GH 20825 + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + + # fundamentally this is not a great operation + # as overflow / underflow can easily happen here + # e.g. int8 + int8 + def scalar_add(a, b): + + # TODO; should really be a type specific NA + if pd.isna(a) or pd.isna(b): + return np.nan + if is_integer(a): + a = int(a) + elif is_integer(b): + b = int(b) + return a + b + + result = s1.combine(s2, scalar_add) + expected = pd.Series( + orig_data1._from_sequence([scalar_add(a, b) for (a, b) in + zip(orig_data1, + orig_data2)])) + self.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series( + orig_data1._from_sequence([a + val for a in list(orig_data1)])) + self.assert_series_equal(result, expected) + + +class TestCasting(BaseInteger, base.BaseCastingTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_construct_index(self, all_data, dropna): + # ensure that we do not coerce to Float64Index, rather + # keep as Index + + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Index(IntegerArray(other, + dtype=all_data.dtype)) + expected = pd.Index(other, dtype=object) + + self.assert_index_equal(result, expected) + + @pytest.mark.parametrize('dropna', [True, False]) + def test_astype_index(self, all_data, dropna): + # as an int/uint index to Index + + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + dtype = all_data.dtype + idx = pd.Index(np.array(other)) + assert isinstance(idx, ABCIndexClass) + + result = idx.astype(dtype) + expected = idx.astype(object).astype(dtype) + self.assert_index_equal(result, expected) + + def test_astype(self, all_data): + all_data = all_data[:10] + + ints = all_data[~all_data.isna()] + mixed = all_data + dtype = Int8Dtype() + + # coerce to same type - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype) + expected = pd.Series(ints) + self.assert_series_equal(result, expected) + + # coerce to same other - ints + s = pd.Series(ints) + result = s.astype(dtype) + expected = pd.Series(ints, dtype=dtype) + self.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype.numpy_dtype) + expected = pd.Series(ints._data.astype( + all_data.dtype.numpy_dtype)) + tm.assert_series_equal(result, expected) + + # coerce to same type - mixed + s = pd.Series(mixed) + result = s.astype(all_data.dtype) + expected = pd.Series(mixed) + self.assert_series_equal(result, expected) + + # coerce to same other - mixed + s = pd.Series(mixed) + result = s.astype(dtype) + expected = pd.Series(mixed, dtype=dtype) + self.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - mixed + s = pd.Series(mixed) + with pytest.raises(ValueError): + s.astype(all_data.dtype.numpy_dtype) + + # coerce to object + s = pd.Series(mixed) + result = s.astype('object') + expected = pd.Series(np.asarray(mixed)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [Int8Dtype(), 'Int8']) + def test_astype_specific_casting(self, dtype): + s = pd.Series([1, 2, 3], dtype='Int64') + result = s.astype(dtype) + expected = pd.Series([1, 2, 3], dtype='Int8') + self.assert_series_equal(result, expected) + + s = pd.Series([1, 2, 3, None], dtype='Int64') + result = s.astype(dtype) + expected = pd.Series([1, 2, 3, None], dtype='Int8') + self.assert_series_equal(result, expected) + + def test_construct_cast_invalid(self, dtype): + + msg = "cannot safely" + arr = [1.2, 2.3, 3.7] + with tm.assert_raises_regex(TypeError, msg): + IntegerArray(arr, dtype=dtype) + + with tm.assert_raises_regex(TypeError, msg): + pd.Series(arr).astype(dtype) + + arr = [1.2, 2.3, 3.7, np.nan] + with tm.assert_raises_regex(TypeError, msg): + IntegerArray(arr, dtype=dtype) + + with tm.assert_raises_regex(TypeError, msg): + pd.Series(arr).astype(dtype) + + +class TestGroupby(BaseInteger, base.BaseGroupbyTests): + + @pytest.mark.xfail(reason="groupby not working") + def test_groupby_extension_no_sort(self, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_no_sort( + data_for_grouping) + + @pytest.mark.xfail(reason="groupby not working") + @pytest.mark.parametrize('as_index', [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_agg( + as_index, data_for_grouping) + + +def test_frame_repr(data_missing): + + df = pd.DataFrame({'A': data_missing}) + result = repr(df) + expected = ' A\n0 NaN\n1 1' + assert result == expected + + +def test_conversions(data_missing): + + # astype to object series + df = pd.DataFrame({'A': data_missing}) + result = df['A'].astype('object') + expected = pd.Series(np.array([np.nan, 1], dtype=object), name='A') + tm.assert_series_equal(result, expected) + + # convert to object ndarray + # we assert that we are exactly equal + # including type conversions of scalars + result = df['A'].astype('object').values + expected = np.array([np.nan, 1], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + for r, e in zip(result, expected): + if pd.isnull(r): + assert pd.isnull(e) + elif is_integer(r): + # PY2 can be int or long + assert r == e + assert is_integer(e) + else: + assert r == e + assert type(r) == type(e) + + +@pytest.mark.parametrize( + 'values', + [ + ['foo', 'bar'], + 'foo', + 1, + 1.0, + pd.date_range('20130101', periods=2), + np.array(['foo'])]) +def test_to_integer_array_error(values): + # error in converting existing arrays to IntegerArrays + with pytest.raises(TypeError): + to_integer_array(values) + + +@pytest.mark.parametrize( + 'values, to_dtype, result_dtype', + [ + (np.array([1], dtype='int64'), None, Int64Dtype), + (np.array([1, np.nan]), None, Int64Dtype), + (np.array([1, np.nan]), 'int8', Int8Dtype)]) +def test_to_integer_array(values, to_dtype, result_dtype): + # convert existing arrays to IntegerArrays + result = to_integer_array(values, dtype=to_dtype) + expected = IntegerArray(values, dtype=result_dtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_cross_type_arithmetic(): + + df = pd.DataFrame({'A': pd.Series([1, 2, np.nan], dtype='Int64'), + 'B': pd.Series([1, np.nan, 3], dtype='UInt8'), + 'C': [1, 2, 3]}) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype='Int64') + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, False]) + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype='Int64') + tm.assert_series_equal(result, expected) + + +# TODO(jreback) - these need testing / are broken + +# shift + +# set_index (destroys type) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 160bf259e1e32..34c397252a8bb 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -13,8 +13,6 @@ import collections import itertools import numbers -import random -import string import sys import numpy as np @@ -54,7 +52,7 @@ def construct_from_string(cls, string): class JSONArray(ExtensionArray): dtype = JSONDtype() - def __init__(self, values, copy=False): + def __init__(self, values, dtype=None, copy=False): for val in values: if not isinstance(val, self.dtype.type): raise TypeError("All values must be of type " + @@ -69,7 +67,7 @@ def __init__(self, values, copy=False): # self._values = self.values = self.data @classmethod - def _from_sequence(cls, scalars, copy=False): + def _from_sequence(cls, scalars, dtype=None, copy=False): return cls(scalars) @classmethod @@ -180,10 +178,3 @@ def _values_for_argsort(self): # cast them to an (N, P) array, instead of an (N,) array of tuples. frozen = [()] + list(tuple(x.items()) for x in self) return np.array(frozen, dtype=object)[1:] - - -def make_data(): - # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer - return [collections.UserDict([ - (random.choice(string.ascii_letters), random.randint(0, 100)) - for _ in range(random.randint(0, 10))]) for _ in range(100)] diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 7eeaf7946663e..520c303f1990b 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -1,5 +1,7 @@ import operator import collections +import random +import string import pytest @@ -8,11 +10,18 @@ from pandas.compat import PY2, PY36 from pandas.tests.extension import base -from .array import JSONArray, JSONDtype, make_data +from .array import JSONArray, JSONDtype pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict") +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + return [collections.UserDict([ + (random.choice(string.ascii_letters), random.randint(0, 100)) + for _ in range(random.randint(0, 10))]) for _ in range(100)] + + @pytest.fixture def dtype(): return JSONDtype() @@ -203,7 +212,8 @@ def test_combine_add(self, data_repeated): class TestCasting(BaseJSON, base.BaseCastingTests): - @pytest.mark.xfail + + @pytest.mark.xfail(reason="failing on np.array(self, dtype=str)") def test_astype_str(self): """This currently fails in NumPy on np.array(self, dtype=str) with diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 44b818be84e31..b6223ea96d7dd 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -22,7 +22,16 @@ def __array__(self, dtype): @property def dtype(self): - return self.data.dtype + return DummyDtype() + + def astype(self, dtype, copy=True): + # we don't support anything but a single dtype + if isinstance(dtype, DummyDtype): + if copy: + return type(self)(self.data) + return self + + return np.array(self, dtype=dtype, copy=copy) class TestExtensionArrayDtype(object): @@ -61,10 +70,10 @@ def test_astype_no_copy(): arr = DummyArray(np.array([1, 2, 3], dtype=np.int64)) result = arr.astype(arr.dtype, copy=False) - assert arr.data is result + assert arr is result result = arr.astype(arr.dtype) - assert arr.data is not result + assert arr is not result @pytest.mark.parametrize('dtype', [ diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index b3a4bfa878c3f..1e96ac730a0eb 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -56,7 +56,7 @@ def test_astype_cannot_cast(self, index, dtype): index.astype(dtype) def test_astype_invalid_dtype(self, index): - msg = 'data type "fake_dtype" not understood' + msg = "data type 'fake_dtype' not understood" with tm.assert_raises_regex(TypeError, msg): index.astype('fake_dtype') diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index d46e19ef56dd0..8c9d0459eff55 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -139,7 +139,7 @@ def test_generic_errors(self, constructor): constructor(dtype='int64', **filler) # invalid dtype - msg = 'data type "invalid" not understood' + msg = "data type 'invalid' not understood" with tm.assert_raises_regex(TypeError, msg): constructor(dtype='invalid', **filler) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index fe224436c52e6..e95e41bbdeefa 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -226,10 +226,13 @@ def test_constructor_categorical(self): res = Series(cat) tm.assert_categorical_equal(res.values, cat) + # can cast to a new dtype + result = Series(pd.Categorical([1, 2, 3]), + dtype='int64') + expected = pd.Series([1, 2, 3], dtype='int64') + tm.assert_series_equal(result, expected) + # GH12574 - pytest.raises( - ValueError, lambda: Series(pd.Categorical([1, 2, 3]), - dtype='int64')) cat = Series(pd.Categorical([1, 2, 3]), dtype='category') assert is_categorical_dtype(cat) assert is_categorical_dtype(cat.dtype)
closes #20700 closes #20747 ``` In [1]: df = pd.DataFrame({ 'A': pd.Series([1, 2, np.nan], dtype='Int64'), 'B': pd.Series([1, np.nan, 3], dtype='UInt8'), 'C': [1, 2, 3]}) In [2]: df Out[2]: A B C 0 1 1 1 1 2 NaN 2 2 NaN 3 3 In [3]: df.dtypes Out[3]: A Int64 B UInt8 C int64 dtype: object In [4]: df.A + df.B Out[4]: 0 2 1 NaN 2 NaN dtype: Int64 In [5]: df.A + df.C Out[5]: 0 2 1 4 2 NaN dtype: Int64 In [6]: (df.A + df.C) * 3 Out[6]: 0 6 1 12 2 NaN dtype: Int64 In [7]: (df.A + df.C) * 3 == 1 Out[7]: 0 False 1 False 2 False dtype: bool In [8]: (df.A + df.C) * 3 == 12 Out[8]: 0 False 1 True 2 False dtype: bool ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21160
2018-05-22T00:00:03Z
2018-07-20T20:44:33Z
2018-07-20T20:44:33Z
2018-07-20T22:21:06Z
TST: Escape invalid escape characters
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 81d775157cf62..5d50c45fe7eca 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -241,7 +241,7 @@ def str_count(arr, pat, flags=0): Escape ``'$'`` to find the literal dollar sign. >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat']) - >>> s.str.count('\$') + >>> s.str.count('\\$') 0 1 1 0 2 1 @@ -358,7 +358,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): Returning any digit using regular expression. - >>> s1.str.contains('\d', regex=True) + >>> s1.str.contains('\\d', regex=True) 0 False 1 False 2 False
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21154
2018-05-21T16:53:25Z
2018-05-21T23:11:16Z
2018-05-21T23:11:16Z
2018-05-22T00:33:29Z
TST: Remove .ix deprecation warnings
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6d74ce54faa94..5459b6910e11a 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -397,11 +397,13 @@ def test_getitem_setitem_ix_negative_integers(self): df = DataFrame(np.random.randn(8, 4)) # ix does label-based indexing when having an integer index - with pytest.raises(KeyError): - df.ix[[-1]] + with catch_warnings(record=True): + with pytest.raises(KeyError): + df.ix[[-1]] - with pytest.raises(KeyError): - df.ix[:, [-1]] + with catch_warnings(record=True): + with pytest.raises(KeyError): + df.ix[:, [-1]] # #1942 a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)]) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 7a17408d4468f..9c992770fc64c 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -197,7 +197,7 @@ def test_dups_fancy_indexing(self): # List containing only missing label dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD')) with pytest.raises(KeyError): - dfnu.ix[['E']] + dfnu.loc[['E']] # ToDo: check_index_type can be True after GH 11497
Partially addresses #21137.
https://api.github.com/repos/pandas-dev/pandas/pulls/21148
2018-05-21T07:42:41Z
2018-05-21T10:34:40Z
2018-05-21T10:34:40Z
2018-05-21T10:43:48Z
Shorter MultiIndex representation
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5a553264e828b..6ae2a79409c9e 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -15,6 +15,36 @@ and bug fixes. We recommend that all users upgrade to this version. New features ~~~~~~~~~~~~ +.. _whatsnew_0231.enhancements.new_multi_index_repr_: + +MultiIndex now has limits on many levels/labels are shown when printed +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Outputting a :class:`MultiIndex` used to print all level/label of the +multiindex. This could be a problem for large indices as the output could be +slow to print and make the console output difficult to navigate. + +Outputting of ``MultiIndex`` instances now has limits to the number of levels +and labels shown ((:issue:`21145`): + +.. ipython:: python + + index1=range(1000) + pd.MultiIndex.from_arrays([index1, index1]) + +Previously all 1000 index rows would have been shown. + +For smaller number of values, all values will still be shown: + +.. ipython:: python + + index1=range(30) + pd.MultiIndex.from_arrays([index1, index1]) + index1=range(2) + pd.MultiIndex.from_arrays([index1, index1]) + +You can change the cutoff point for when all values are shown in the outputs +by changing :attr:`options.display.max_seq_items` (default is 100). .. _whatsnew_0231.deprecations: diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..24e44c657d71c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -7,7 +7,7 @@ import numpy as np from pandas._libs import algos as libalgos, index as libindex, lib, Timestamp -from pandas.compat import range, zip, lrange, lzip, map +from pandas.compat import range, zip, lrange, lzip, map, u from pandas.compat.numpy import function as nv from pandas import compat @@ -609,11 +609,28 @@ def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ + def to_string_helper(obj, attr_name): + """converts obj.attr_name to a string. + """ + indices = getattr(obj, attr_name) + if attr_name == 'labels': + # self.labels is a list of FrozenNDArray, Index._format_data + # expects a pd.Index + indices = [Index(i) for i in indices] + + _name = u("{}({}=").format(obj.__class__.__name__, attr_name) + attr_string = [idx._format_data(name=_name) + for idx in indices] + attr_string = u("").join(attr_string) + if attr_string.endswith(u(", ")): # else [1, 2, ], want [1, 2] + attr_string = attr_string[:-2] + + return u("[{}]").format(attr_string) + attrs = [ - ('levels', ibase.default_pprint(self._levels, - max_seq_items=False)), - ('labels', ibase.default_pprint(self._labels, - max_seq_items=False))] + ('levels', to_string_helper(self, attr_name='levels')), + ('labels', to_string_helper(self, attr_name='labels')), + ] if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 37f70090c179f..ae8f3009f1cbd 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -3279,3 +3279,78 @@ def test_duplicate_multiindex_labels(self): with pytest.raises(ValueError): ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], inplace=True) + + def test_repr(self): + # GH21145 + + # no items + idx1, idx2 = range(0), [] + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + expected = """\ +MultiIndex(levels=[[], []], + labels=[[], []])""" + assert repr(idx) == expected + + # two items + idx1, idx2 = [3, 4], [5, 6] + idx = MultiIndex.from_arrays([idx1, idx2]) + expected = """\ +MultiIndex(levels=[[3, 4], [5, 6]], + labels=[[0, 1], [0, 1]])""" + assert repr(idx) == expected + + # 100 items + idx1, idx2 = range(100), range(100) + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + expected = """\ +MultiIndex(levels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], + [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], + ], + labels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], + [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], + ])""" + assert repr(idx) == expected + + # 1000 items + idx1, idx2 = range(1000), range(1000) + idx = pd.MultiIndex.from_arrays([idx1, idx2]) + expected = """\ +MultiIndex(levels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + ... + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], + [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + ... + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], + ], + labels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + ... + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], + [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + ... + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], + ])""" + assert repr(idx) == expected
- [x] closes #12423 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry MultiIndex can currently have quite long repr output, which may also take a long time to print. This PR makes the MultiIndex repr output similar in style to other Index reprs. An alternative would be to output ``labels`` and ``levels`` by supplying ``pd.options.display.max_seq_items`` to ``ibase.default_pprint``, but IMO this is better, as it's more similar to other axis repr's. #### Example with few items in index ```python >>> pd.MultiIndex(levels=[['a', 'b'], ['A', 'B']], labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) MultiIndex(levels=[['a', 'b'], ['A', 'B']], labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) ``` #### Example with many items in index ```python >>> idx=range(1000) >>> pd.MultiIndex.from_arrays([idx, idx]) MultiIndex(levels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ... 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ... 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], ], labels=[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ... 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ... 990, 991, 992, 993, 994, 995, 996, 997, 998, 999], ]) ``` If this approach is ok, I can write up whatsnew, tests etc. but would appreciate feedback before I do that. EDIT: tests and whatsnew added.
https://api.github.com/repos/pandas-dev/pandas/pulls/21145
2018-05-20T23:18:09Z
2018-08-26T08:01:40Z
null
2018-10-27T08:16:14Z
BUG: set keyword argument so zipfile actually compresses
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 4876678baaa6e..c02d988a7bc63 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -83,6 +83,7 @@ Indexing I/O ^^^ +- Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - diff --git a/pandas/io/common.py b/pandas/io/common.py index 0827216975f15..a492b7c0b8e8e 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -5,7 +5,7 @@ import codecs import mmap from contextlib import contextmanager, closing -from zipfile import ZipFile +import zipfile from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat @@ -428,7 +428,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, return f, handles -class BytesZipFile(ZipFile, BytesIO): +class BytesZipFile(zipfile.ZipFile, BytesIO): """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. @@ -437,10 +437,10 @@ class BytesZipFile(ZipFile, BytesIO): bytes strings into a member of the archive. """ # GH 17778 - def __init__(self, file, mode='r', **kwargs): + def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs): if mode in ['wb', 'rb']: mode = mode.replace('b', '') - super(BytesZipFile, self).__init__(file, mode, **kwargs) + super(BytesZipFile, self).__init__(file, mode, compression, **kwargs) def write(self, data): super(BytesZipFile, self).writestr(self.filename, data) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 0b329f64dafa3..bb7ee1b911fee 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- import pytest +import os import collections from functools import partial import numpy as np -from pandas import Series, Timestamp +from pandas import Series, DataFrame, Timestamp from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops @@ -222,3 +223,21 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) + + +@pytest.mark.parametrize('obj', [ + DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(obj, method, compression): + if not compression: + pytest.skip("only test compression case.") + + with tm.ensure_clean() as filename: + getattr(obj, method)(filename, compression=compression) + compressed = os.path.getsize(filename) + getattr(obj, method)(filename, compression=None) + uncompressed = os.path.getsize(filename) + assert uncompressed > compressed
- [x] closes https://github.com/pandas-dev/pandas/issues/17778 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry zipfile.ZipFile has default compression mode `zipfile.ZIP_STORED`. It creates an uncompressed archive member. Whilst it doesn't cause issue, it is a strange default to have given users would want to compress files. In order for zip compression to actually reduce file size, keyword argument `compression=zipfile.ZIP_DEFLATED` is added.
https://api.github.com/repos/pandas-dev/pandas/pulls/21144
2018-05-20T16:16:35Z
2018-05-29T10:41:28Z
2018-05-29T10:41:28Z
2018-06-08T17:18:38Z
CLN: Refactor open into context manager in io tests
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index cfac77291803d..0b1c1ca178762 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -128,9 +128,8 @@ def test_string_io(self): with ensure_clean(self.path) as p: s = df.to_msgpack() - fh = open(p, 'wb') - fh.write(s) - fh.close() + with open(p, 'wb') as fh: + fh.write(s) result = read_msgpack(p) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 0b0d4334c86a3..e369dfda6deac 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -75,9 +75,8 @@ def test_from_csv(self): series_h = self.read_csv(path, header=0) assert series_h.name == "series" - outfile = open(path, "w") - outfile.write("1998-01-01|1.0\n1999-01-01|2.0") - outfile.close() + with open(path, "w") as outfile: + outfile.write("1998-01-01|1.0\n1999-01-01|2.0") series = self.read_csv(path, sep="|") check_series = Series({datetime(1998, 1, 1): 1.0,
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Building on #21105 by @WillAyd, I cleaned up some more `open` statements.
https://api.github.com/repos/pandas-dev/pandas/pulls/21139
2018-05-19T23:21:01Z
2018-05-21T10:37:53Z
2018-05-21T10:37:53Z
2018-05-21T12:00:47Z
DOC: Improve the docstring of Timedelta.delta redux
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..3f0b4db87e5ed 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -760,7 +760,32 @@ cdef class _Timedelta(timedelta): @property def delta(self): - """ return out delta in ns (for internal compat) """ + """ + Return the timedelta in nanoseconds (ns), for internal compatibility. + + Returns + ------- + int + Timedelta in nanoseconds. + + Examples + -------- + >>> td = pd.Timedelta('1 days 42 ns') + >>> td.delta + 86400000000042 + + >>> td = pd.Timedelta('3 s') + >>> td.delta + 3000000000 + + >>> td = pd.Timedelta('3 ms 5 us') + >>> td.delta + 3005000 + + >>> td = pd.Timedelta(42, unit='ns') + >>> td.delta + 42 + """ return self.value @property
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry rather than trying to pick and choose items from my previous PR #21135 (which somehow had a number of other items sucked into it) ... gonna start from scratch here and close out the old PR. tried to incorporate changes noted in previous conversations with @WillAyd and @jreback (thanks for the guidance!) ``` ################################################################################ ###################### Docstring (pandas.Timedelta.delta) ###################### ################################################################################ Return the timedelta in nanoseconds (ns), for internal compatibility. Returns ------- int Timedelta in nanoseconds. Examples -------- >>> td = pd.Timedelta('1 days 42 ns') >>> td.delta 86400000000042 >>> td = pd.Timedelta('3 s') >>> td.delta 3000000000 >>> td = pd.Timedelta('3 ms 5 us') >>> td.delta 3005000 >>> td = pd.Timedelta(42, unit='ns') >>> td.delta 42 ################################################################################ ################################## Validation ################################## ################################################################################ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21138
2018-05-19T20:12:03Z
2018-05-21T11:00:05Z
2018-05-21T11:00:04Z
2018-06-08T17:11:44Z
DOC: Improve docstring of Timedelta.delta
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..4ecbe672dcc16 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -760,7 +760,36 @@ cdef class _Timedelta(timedelta): @property def delta(self): - """ return out delta in ns (for internal compat) """ + """ + Return the timedelta in nanoseconds (ns), for internal compatibility. + + Returns + ------- + int + Timedelta in nanoseconds. + + Examples + -------- + **Using string input** + + >>> td = pd.Timedelta('1 days 42 ns') + >>> td.delta + 86400000000042 + + >>> td = pd.Timedelta('3 s') + >>> td.delta + 3000000000 + + >>> td = pd.Timedelta('3 ms 5 us') + >>> td.delta + 3005000 + + **Using integer input** + + >>> td = pd.Timedelta(42, unit='ns') + >>> td.delta + 42 + """ return self.value @property @@ -770,7 +799,48 @@ cdef class _Timedelta(timedelta): @property def resolution(self): - """ return a string representing the lowest resolution that we have """ + """ + Return a string representing the lowest (i.e. smallest) time resolution. + + Each timedelta has a defined resolution that represents the lowest OR + most granular level of precision. Each level of resolution is + represented by a short string as defined below: + + - Days: 'D' + - Hours: 'H' + - Minutes: 'T' + - Seconds: 'S' + - Milliseconds: 'L' + - Microseconds: 'U' + - Nanoseconds: 'N' + + Returns + ------- + str + Time resolution. + + Examples + -------- + **Using string input** + + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.resolution + 'N' + + >>> td = pd.Timedelta('1 days 2 min 3 us') + >>> td.resolution + 'U' + + >>> td = pd.Timedelta('2 min 3 s') + >>> td.resolution + 'S' + + **Using integer input** + + >>> td = pd.Timedelta(36, unit='us') + >>> td.resolution + 'U' + """ self._ensure_components() if self._ns:
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.pyx" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ###################### Docstring (pandas.Timedelta.delta) ###################### ################################################################################ Return the timedelta in nanoseconds (ns), for internal compatibility. Returns ------- int Timedelta in nanoseconds. Examples -------- **Using string input** >>> td = pd.Timedelta('1 days 42 ns') >>> td.delta 86400000000042 >>> td = pd.Timedelta('3 s') >>> td.delta 3000000000 >>> td = pd.Timedelta('3 ms 5 us') >>> td.delta 3005000 **Using integer input** >>> td = pd.Timedelta(42, unit='ns') >>> td.delta 42 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21135
2018-05-19T15:48:47Z
2018-05-19T20:13:41Z
null
2018-05-19T20:13:41Z
BUG: Should not raise error in concatenating Series with numpy scalar and tuple names (GH21015)
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..5973ad2ebf43f 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -82,7 +82,7 @@ Plotting Reshaping ^^^^^^^^^ -- +- Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - Categorical diff --git a/pandas/core/common.py b/pandas/core/common.py index b9182bfd2cbe2..1de8269c9a0c6 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -55,8 +55,11 @@ def flatten(l): def _consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: - if obj.name != name: - return None + try: + if obj.name != name: + name = None + except ValueError: + name = None return name diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index f5e58fa70e1c4..dea305d4b3fee 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2487,3 +2487,14 @@ def test_concat_aligned_sort_does_not_raise(): columns=[1, 'a']) result = pd.concat([df, df], ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("s1name,s2name", [ + (np.int64(190), (43, 0)), (190, (43, 0))]) +def test_concat_series_name_npscalar_tuple(s1name, s2name): + # GH21015 + s1 = pd.Series({'a': 1, 'b': 2}, name=s1name) + s2 = pd.Series({'c': 5, 'd': 6}, name=s2name) + result = pd.concat([s1, s2]) + expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6}) + tm.assert_series_equal(result, expected)
- [x] closes #21015 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21132
2018-05-19T13:26:57Z
2018-05-21T10:41:20Z
2018-05-21T10:41:20Z
2018-06-08T17:11:01Z
Spell check
diff --git a/pandas/core/base.py b/pandas/core/base.py index aa051c6f5eaef..c331ead8d2fef 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -114,7 +114,7 @@ def _reset_cache(self, key=None): def __sizeof__(self): """ - Generates the total memory usage for a object that returns + Generates the total memory usage for an object that returns either a value or Series of values """ if hasattr(self, 'memory_usage'):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21130
2018-05-19T04:24:56Z
2018-05-19T17:46:10Z
2018-05-19T17:46:10Z
2018-06-08T17:09:19Z
DEPR: Add deprecated index attribute names to deprecation list
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..c8a2076064c02 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -44,6 +44,8 @@ Documentation Changes Bug Fixes ~~~~~~~~~ +- tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) + Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c638b9e4ea117..7a853d575aa69 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -12,7 +12,8 @@ class DirNamesMixin(object): _accessors = frozenset([]) - _deprecations = frozenset(['asobject']) + _deprecations = frozenset( + ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f4fa547574b9e..1e4dd2921b3f5 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2088,6 +2088,17 @@ def test_get_duplicates_deprecated(self): with tm.assert_produces_warning(FutureWarning): index.get_duplicates() + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip('IPython', minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; idx = pd.Index([1, 2])" + ip.run_code(code) + with tm.assert_produces_warning(None): + with provisionalcompleter('ignore'): + list(ip.Completer.completions('idx.', 4)) + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ
In ipython, when you press tab (e.g. ``idx.<tab>``) a long list of deprecations shows up: ``` C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.base is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.data is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.flags is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.itemsize is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.strides is deprecated and will be removed in a future version getattr(obj, name) ``` With this PR we avoid getting that list of deprecations in iPython. I'm not sure if/where this should go in the whatsnew document.
https://api.github.com/repos/pandas-dev/pandas/pulls/21125
2018-05-18T20:57:34Z
2018-05-21T10:44:49Z
2018-05-21T10:44:49Z
2018-10-27T08:16:31Z
DEPR: Add deprecated index attribute names to deprecation list
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..d45b4e19c6aac 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -30,6 +30,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) +- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`) - - diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c638b9e4ea117..7a853d575aa69 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -12,7 +12,8 @@ class DirNamesMixin(object): _accessors = frozenset([]) - _deprecations = frozenset(['asobject']) + _deprecations = frozenset( + ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 78b7ae7054248..150eca32e229d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -378,7 +378,7 @@ def _engine(self): # introspection @cache_readonly def is_unique(self): - return not self.duplicated().any() + return self._engine.is_unique @property def is_monotonic_increasing(self): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 0e630f69b1a32..a2a4170256088 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -581,6 +581,15 @@ def test_is_monotonic(self, data, non_lexsorted_data): assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing + @pytest.mark.parametrize('values, expected', [ + ([1, 2, 3], True), + ([1, 3, 1], False), + (list('abc'), True), + (list('aba'), False)]) + def test_is_unique(self, values, expected): + ci = CategoricalIndex(values) + assert ci.is_unique is expected + def test_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo')
In ipython, when you press tab (e.g. ``idx.<tab>`` a long list of deprecations shows up: ``` C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.base is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.data is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.flags is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.itemsize is deprecated and will be removed in a future version getattr(obj, name) C:\Users\TP\Miniconda3\envs\pandas-dev\lib\site-packages\jedi\evaluate\compiled\__init__.py:328: FutureWarning: Int64Index.strides is deprecated and will be removed in a future version getattr(obj, name) ``` This PR we avoid getting that list of deprecations in iPython. I'm not sure if/whre this should go in the whatsnew document.
https://api.github.com/repos/pandas-dev/pandas/pulls/21124
2018-05-18T19:05:51Z
2018-05-18T19:06:19Z
null
2018-05-18T19:06:19Z
ENH: add np.nan funcs to _cython_table
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5a553264e828b..e08f9809c8f92 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -94,3 +94,9 @@ Categorical ^^^^^^^^^^^ - + +Numeric +^^^^^^^ + +- :meth:`~DataFrame.agg` now correctly handles numpy NaN-aware methods like :meth:`numpy.nansum` (:issue:`19629`) +- :meth:`~DataFrame.agg` now correctly handles built-in methods like ``sum`` when axis=1 (:issue:`21134`) diff --git a/pandas/conftest.py b/pandas/conftest.py index b09cb872a12fb..3eda078a802f4 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -149,3 +149,20 @@ def tz_aware_fixture(request): Fixture for trying explicit timezones: {0} """ return request.param + + +@pytest.fixture( + # params: Python 3.5 randomizes dict access and xdist doesn't like that + # in fixtures. In order to get predetermined values we need to sort + # the list deterministically + # GH 21123 + params=list(sorted(pd.core.base.SelectionMixin._cython_table.items(), + key=lambda x: x[0].__name__)), + ids=lambda x: "({}-{!r})_fixture".format(x[0].__name__, x[1]), +) +def cython_table_items(request): + """ + Fixture for returning the items in + pandas.core.base.SelectionMixin._cython_table + """ + return request.param diff --git a/pandas/core/base.py b/pandas/core/base.py index c331ead8d2fef..874168f5a49c7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -22,7 +22,8 @@ from pandas.core import common as com, algorithms import pandas.core.nanops as nanops import pandas._libs.lib as lib -from pandas.compat.numpy import function as nv +from pandas.compat.numpy import (function as nv, _np_version_under1p10, + _np_version_under1p12) from pandas.compat import PYPY from pandas.util._decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) @@ -191,17 +192,31 @@ class SelectionMixin(object): np.all: 'all', np.any: 'any', np.sum: 'sum', + np.nansum: 'sum', np.mean: 'mean', + np.nanmean: 'mean', np.prod: 'prod', np.std: 'std', + np.nanstd: 'std', np.var: 'var', + np.nanvar: 'var', np.median: 'median', + np.nanmedian: 'median', np.max: 'max', + np.nanmax: 'max', np.min: 'min', + np.nanmin: 'min', np.cumprod: 'cumprod', - np.cumsum: 'cumsum' + np.cumsum: 'cumsum', } + if not _np_version_under1p10: + _cython_table[np.nanprod] = 'prod' + + if not _np_version_under1p12: + _cython_table[np.nancumprod] = 'cumprod' + _cython_table[np.nancumsum] = 'cumsum' + @property def _selection_name(self): """ @@ -316,13 +331,14 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs): raise ValueError("{arg} is an unknown string function".format(arg=arg)) - def _aggregate(self, arg, *args, **kwargs): + def _aggregate(self, arg, axis=0, *args, **kwargs): """ provide an implementation for the aggregators Parameters ---------- arg : string, dict, function + axis : int *args : args to pass on to the function **kwargs : kwargs to pass on to the function @@ -335,17 +351,18 @@ def _aggregate(self, arg, *args, **kwargs): how can be a string describe the required post-processing, or None if not required """ + obj = self if axis == 0 else self.T is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) is_nested_renamer = False _axis = kwargs.pop('_axis', None) if _axis is None: - _axis = getattr(self, 'axis', 0) + _axis = getattr(obj, 'axis', 0) _level = kwargs.pop('_level', None) if isinstance(arg, compat.string_types): - return self._try_aggregate_string_function(arg, *args, - **kwargs), None + return obj._try_aggregate_string_function(arg, *args, + **kwargs), None if isinstance(arg, dict): @@ -353,7 +370,7 @@ def _aggregate(self, arg, *args, **kwargs): if _axis != 0: # pragma: no cover raise ValueError('Can only pass dict with axis=0') - obj = self._selected_obj + selected_obj = obj._selected_obj def nested_renaming_depr(level=4): # deprecation of nested renaming @@ -388,16 +405,16 @@ def nested_renaming_depr(level=4): if isinstance(v, dict): is_nested_renamer = True - if k not in obj.columns: + if k not in selected_obj.columns: msg = ('cannot perform renaming for {key} with a ' 'nested dictionary').format(key=k) raise SpecificationError(msg) nested_renaming_depr(4 + (_level or 0)) - elif isinstance(obj, ABCSeries): + elif isinstance(selected_obj, ABCSeries): nested_renaming_depr() - elif isinstance(obj, ABCDataFrame) and \ - k not in obj.columns: + elif isinstance(selected_obj, ABCDataFrame) and \ + k not in selected_obj.columns: raise KeyError( "Column '{col}' does not exist!".format(col=k)) @@ -407,8 +424,8 @@ def nested_renaming_depr(level=4): # deprecation of renaming keys # GH 15931 keys = list(compat.iterkeys(arg)) - if (isinstance(obj, ABCDataFrame) and - len(obj.columns.intersection(keys)) != len(keys)): + if (isinstance(selected_obj, ABCDataFrame) and len( + selected_obj.columns.intersection(keys)) != len(keys)): nested_renaming_depr() from pandas.core.reshape.concat import concat @@ -417,7 +434,7 @@ def _agg_1dim(name, how, subset=None): """ aggregate a 1-dim with how """ - colg = self._gotitem(name, ndim=1, subset=subset) + colg = obj._gotitem(name, ndim=1, subset=subset) if colg.ndim != 1: raise SpecificationError("nested dictionary is ambiguous " "in aggregation") @@ -427,8 +444,8 @@ def _agg_2dim(name, how): """ aggregate a 2-dim with how """ - colg = self._gotitem(self._selection, ndim=2, - subset=obj) + colg = obj._gotitem(obj._selection, ndim=2, + subset=selected_obj) return colg.aggregate(how, _level=None) def _agg(arg, func): @@ -458,20 +475,22 @@ def _agg(arg, func): else: - if self._selection is not None: + if obj._selection is not None: keys = None # some selection on the object - elif self._selection is not None: + elif obj._selection is not None: - sl = set(self._selection_list) + sl = set(obj._selection_list) # we are a Series like object, # but may have multiple aggregations if len(sl) == 1: - result = _agg(arg, lambda fname, - agg_how: _agg_1dim(self._selection, agg_how)) + result = _agg( + arg, + lambda fname, agg_how: _agg_1dim( + obj._selection, agg_how)) # we are selecting the same set as we are aggregating elif not len(sl - set(keys)): @@ -516,7 +535,7 @@ def is_any_frame(): return concat([result[k] for k in keys], keys=keys, axis=1), True - elif isinstance(self, ABCSeries) and is_any_series(): + elif isinstance(obj, ABCSeries) and is_any_series(): # we have a dict of Series # return a MI Series @@ -541,20 +560,20 @@ def is_any_frame(): # we have a dict of scalars result = Series(result, - name=getattr(self, 'name', None)) + name=getattr(obj, 'name', None)) return result, True elif is_list_like(arg) and arg not in compat.string_types: # we require a list, but not an 'str' - return self._aggregate_multiple_funcs(arg, - _level=_level, - _axis=_axis), None + return obj._aggregate_multiple_funcs(arg, + _level=_level, + _axis=_axis), None else: result = None - f = self._is_cython_func(arg) - if f and not args and not kwargs: - return getattr(self, f)(), None + f = obj._is_cython_func(arg) + if f is not None: + return getattr(obj, f)(*args, **kwargs), None # caller can react return result, True diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b6c33b4f79478..c515b13aaac82 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5818,13 +5818,11 @@ def _gotitem(self, def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) - # TODO: flipped axis result = None - if axis == 0: - try: - result, how = self._aggregate(func, axis=0, *args, **kwargs) - except TypeError: - pass + try: + result, how = self._aggregate(func, axis=axis, *args, **kwargs) + except TypeError: + pass if result is None: return self.apply(func, axis=axis, args=args, **kwargs) return result diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index df7a5dc9dc173..616345dde2d2f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -4086,7 +4086,10 @@ def _post_process_cython_aggregate(self, obj): def aggregate(self, arg, *args, **kwargs): _level = kwargs.pop('_level', None) - result, how = self._aggregate(arg, _level=_level, *args, **kwargs) + _agg_kwargs = kwargs.copy() + axis = _agg_kwargs.pop('axis', 0) + result, how = self._aggregate(arg, axis, _level=_level, + *args, **_agg_kwargs) if how is None: return result diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index dfb2961befe35..e3c87917ec89f 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -1056,3 +1056,71 @@ def test_non_callable_aggregates(self): expected = df.size assert result == expected + + @pytest.mark.parametrize("inputs", [ + [DataFrame(), { + 'sum': Series(), + 'max': Series(), + 'min': Series(), + 'all': Series(dtype=bool), + 'any': Series(dtype=bool), + 'mean': Series(), + 'prod': Series(), + 'std': Series(), + 'var': Series(), + 'median': Series(), + 'cumprod': DataFrame(), + 'cumsum': DataFrame(), + }], + [DataFrame([[np.nan, 1], [1, 2]]), { + 'sum': Series([1., 3]), + 'max': Series([1., 2]), + 'min': Series([1., 1]), + 'all': Series([True, True]), + 'any': Series([True, True]), + 'mean': Series([1, 1.5]), + 'prod': Series([1., 2]), + 'std': Series([np.nan, 0.707107]), + 'var': Series([np.nan, 0.5]), + 'median': Series([1, 1.5]), + 'cumprod': DataFrame([[np.nan, 1], [1., 2.]]), + 'cumsum': DataFrame([[np.nan, 1], [1., 3.]]), + }], + [DataFrame([['a', 'b'], ['b', 'a']]), { + 'sum': Series(['ab', 'ba']), + 'max': Series(['b', 'b']), + 'min': Series(['a', 'a']), + 'all': Series([True, True]), + 'any': Series([True, True]), + 'mean': Series([], index=pd.Index([], dtype='int64')), + 'prod': Series([], index=pd.Index([], dtype='int64')), + 'std': Series([], index=pd.Index([], dtype='int64')), + 'var': Series([], index=pd.Index([], dtype='int64')), + 'median': Series([], index=pd.Index([], dtype='int64')), + 'cumprod': TypeError, + 'cumsum': DataFrame([['a', 'b'], ['ab', 'ba']]), + }], + ]) + @pytest.mark.parametrize("axis", [0, 1], ids=lambda x: "axis {}".format(x)) + def test_agg_function_input(self, cython_table_items, inputs, axis): + # GH21123 + np_func, str_func = cython_table_items + df = inputs[0] + expected = inputs[1][str_func] + + if isinstance(expected, type) and issubclass(expected, Exception): + with pytest.raises(expected): + # e.g. DataFrame(['a b'.split()]).cumprod() will raise + df.agg(np_func, axis=axis) + with pytest.raises(expected): + df.agg(str_func, axis=axis) + return + + result = df.agg(np_func, axis=axis) + result_str_func = df.agg(str_func, axis=axis) + if str_func in ('cumprod', 'cumsum'): + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result_str_func, expected) + else: + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result_str_func, expected) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index b28b9f342695f..662a411c6fbd3 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -331,6 +331,75 @@ def test_non_callable_aggregates(self): ('mean', 1.5)])) assert_series_equal(result[expected.index], expected) + @pytest.mark.parametrize("inputs", [ + [Series(), { + 'sum': 0, + 'max': np.nan, + 'min': np.nan, + 'all': True, + 'any': False, + 'mean': np.nan, + 'prod': 1, + 'std': np.nan, + 'var': np.nan, + 'median': np.nan, + 'cumprod': Series([], Index([])), + 'cumsum': Series([], Index([])), + }], + [Series([np.nan, 1, 2, 3]), { + 'sum': 6, + 'max': 3, + 'min': 1, + 'all': True, + 'any': True, + 'mean': 2, + 'prod': 6, + 'std': 1, + 'var': 1, + 'median': 2, + 'cumprod': Series([np.nan, 1, 2, 6]), + 'cumsum': Series([np.nan, 1, 3, 6]), + }], + [Series('a b c'.split()), { + 'sum': 'abc', + 'max': 'c', + 'min': 'a', + 'all': 'c', # see GH12863 + 'any': 'a', + 'mean': TypeError, # mean raises TypeError + 'prod': TypeError, + 'std': TypeError, + 'var': TypeError, + 'median': TypeError, + 'cumprod': TypeError, + 'cumsum': Series(['a', 'ab', 'abc']), + }], + ]) + def test_agg_function_input(self, inputs, cython_table_items): + # GH21123 + np_func, str_func = cython_table_items + series = inputs[0] + expected = inputs[1][str_func] + + if isinstance(expected, type) and issubclass(expected, Exception): + with pytest.raises(expected): + series.agg(np_func) + with pytest.raises(expected): + series.agg(str_func) + return + + result = series.agg(np_func) + result_str_func = series.agg(str_func) + if str_func in ('cumprod', 'cumsum'): + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result_str_func, expected) + elif tm.is_number(expected): + assert np.isclose(result, expected, equal_nan=True) + assert np.isclose(result_str_func, expected, equal_nan=True) + else: + assert result == expected + assert result_str_func == expected + class TestSeriesMap(TestData):
closes #19629 closes #21134 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This started as a copy of #19670 by @AaronCritchley, but has solved two bugs that the tests surfaced along the way. Bug 1: there is currently a bug in ``df.aggregate``, where the method incorrectly defers to ``df.apply`` in a corner case. This only shows up in the result when using numpy < 1.13 and passing np.nan* functions to ``df.aggregate``. This is the reason for the change in ``base.py`` line 571. (see #8383 for further details on the bug in numpy<1.13 and how it affects pandas.) Bug 2: Passing builtins to ``df.aggregate`` is ok when ``axis=0``, but gives wrong result,when ``axis=1`` (#21134). The reason for this difference is that ``df.aggregate`` defers to ``df._aggregate`` when ``axis=0,`` but defers to ``df.apply``, when ``axis=1``, and these give different result when passed funcions and the series/frame contains Nan values. This can be solved by transposing df and defering the transposed frame to its ``_aggragate`` method when ``axis=1``. The added tests have been heavily parametrized (this helped unearth the bugs above). Thet have been placed in ``series/test_apply.py`` and ``frame/test_apply``, as a lot of other tests for ser/df.aggregate were already there.
https://api.github.com/repos/pandas-dev/pandas/pulls/21123
2018-05-18T18:47:01Z
2018-07-28T23:08:21Z
null
2018-07-28T23:08:21Z
Resolution docstring
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 76849f2116123..a76ebc8000e54 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -834,7 +834,46 @@ cdef class _Timedelta(timedelta): @property def resolution(self): - """ return a string representing the lowest resolution that we have """ + """ + Return a string representing the lowest timedelta resolution. + + Each timedelta has a defined resolution that represents the lowest OR + most granular level of precision. Each level of resolution is + represented by a short string as defined below: + + Resolution: Return value + + * Days: 'D' + * Hours: 'H' + * Minutes: 'T' + * Seconds: 'S' + * Milliseconds: 'L' + * Microseconds: 'U' + * Nanoseconds: 'N' + + Returns + ------- + str + Timedelta resolution. + + Examples + -------- + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.resolution + 'N' + + >>> td = pd.Timedelta('1 days 2 min 3 us') + >>> td.resolution + 'U' + + >>> td = pd.Timedelta('2 min 3 s') + >>> td.resolution + 'S' + + >>> td = pd.Timedelta(36, unit='us') + >>> td.resolution + 'U' + """ self._ensure_components() if self._ns:
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ################### Docstring (pandas.Timedelta.resolution) ################### ################################################################################ Return a string representing the lowest (i.e. smallest) time resolution. Each timedelta has a defined resolution that represents the lowest OR most granular level of precision. Each level of resolution is represented by a short string as defined below: ============ ============ Resolution Return value ============ ============ Days ``'D'`` Hours ``'H'`` Minutes ``'T'`` Seconds ``'S'`` Milliseconds ``'L'`` Microseconds ``'U'`` Nanoseconds ``'N'`` ============ ============ Returns ------- str Time resolution. Examples -------- **Using string input** >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') >>> td.resolution 'N' >>> td = pd.Timedelta('1 days 2 min 3 us') >>> td.resolution 'U' >>> td = pd.Timedelta('2 min 3 s') >>> td.resolution 'S' **Using integer input** >>> td = pd.Timedelta(36, unit='us') >>> td.resolution 'U' ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21122
2018-05-18T16:16:56Z
2018-07-08T12:58:40Z
2018-07-08T12:58:40Z
2018-07-08T12:58:49Z
Idx droplevel
diff --git a/doc/source/api.rst b/doc/source/api.rst index d00e5511f1100..4faec93490fde 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1459,7 +1459,6 @@ Modifying and Computations Index.is_floating Index.is_integer Index.is_interval - Index.is_lexsorted_for_tuple Index.is_mixed Index.is_numeric Index.is_object @@ -1471,11 +1470,19 @@ Modifying and Computations Index.where Index.take Index.putmask - Index.set_names Index.unique Index.nunique Index.value_counts +Compatibility with MultiIndex +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Index.set_names + Index.is_lexsorted_for_tuple + Index.droplevel + Missing Values ~~~~~~~~~~~~~~ .. autosummary:: diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9382d74f95295..0071f315851df 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -65,6 +65,7 @@ Indexing ^^^^^^^^ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) +- :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with MultiIndex (:issue:`21115`) - I/O diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 77a67c048a48d..0986ed289e603 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4096,9 +4096,8 @@ def _maybe_casted_values(index, labels=None): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if isinstance(self.index, MultiIndex): - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df39eb5fd8312..f79288c167356 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3158,6 +3158,60 @@ def _get_level_values(self, level): get_level_values = _get_level_values + def droplevel(self, level=0): + """ + Return index with requested level(s) removed. If resulting index has + only 1 level left, the result will be of Index type, not MultiIndex. + + .. versionadded:: 0.23.1 (support for non-MultiIndex) + + Parameters + ---------- + level : int, str, or list-like, default 0 + If a string is given, must be the name of a level + If list-like, elements must be names or indexes of levels. + + Returns + ------- + index : Index or MultiIndex + """ + if not isinstance(level, (tuple, list)): + level = [level] + + levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] + + if len(level) == 0: + return self + if len(level) >= self.nlevels: + raise ValueError("Cannot remove {} levels from an index with {} " + "levels: at least one level must be " + "left.".format(len(level), self.nlevels)) + # The two checks above guarantee that here self is a MultiIndex + + new_levels = list(self.levels) + new_labels = list(self.labels) + new_names = list(self.names) + + for i in levnums: + new_levels.pop(i) + new_labels.pop(i) + new_names.pop(i) + + if len(new_levels) == 1: + + # set nan if needed + mask = new_labels[0] == -1 + result = new_levels[0].take(new_labels[0]) + if mask.any(): + result = result.putmask(mask, np.nan) + + result.name = new_names[0] + return result + else: + from .multi import MultiIndex + return MultiIndex(levels=new_levels, labels=new_labels, + names=new_names, verify_integrity=False) + _index_shared_docs['get_indexer'] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..ea0fab7e17648 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1761,52 +1761,6 @@ def _drop_from_level(self, labels, level): return self[mask] - def droplevel(self, level=0): - """ - Return Index with requested level removed. If MultiIndex has only 2 - levels, the result will be of Index type not MultiIndex. - - Parameters - ---------- - level : int/level name or list thereof - - Notes - ----- - Does not check if result index is unique or not - - Returns - ------- - index : Index or MultiIndex - """ - levels = level - if not isinstance(levels, (tuple, list)): - levels = [level] - - new_levels = list(self.levels) - new_labels = list(self.labels) - new_names = list(self.names) - - levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1] - - for i in levnums: - new_levels.pop(i) - new_labels.pop(i) - new_names.pop(i) - - if len(new_levels) == 1: - - # set nan if needed - mask = new_labels[0] == -1 - result = new_levels[0].take(new_labels[0]) - if mask.any(): - result = result.putmask(mask, np.nan) - - result.name = new_names[0] - return result - else: - return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names, verify_integrity=False) - def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. diff --git a/pandas/core/series.py b/pandas/core/series.py index 6d396e845219e..7947ce576dc6f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1199,9 +1199,8 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if isinstance(self.index, MultiIndex): - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index @@ -3177,7 +3176,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): - f = lambda x: func(x, *args, **kwds) + def f(x): + return func(x, *args, **kwds) else: f = func diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f4fa547574b9e..7fc00ed8f5411 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -245,6 +245,25 @@ def test_constructor_int_dtype_nan(self): result = Index(data, dtype='float') tm.assert_index_equal(result, expected) + def test_droplevel(self, indices): + # GH 21115 + if isinstance(indices, MultiIndex): + # Tested separately in test_multi.py + return + + assert indices.droplevel([]).equals(indices) + + for level in indices.name, [indices.name]: + if isinstance(indices.name, tuple) and level is indices.name: + # GH 21121 : droplevel with tuple name + continue + with pytest.raises(ValueError): + indices.droplevel(level) + + for level in 'wrong', ['wrong']: + with pytest.raises(KeyError): + indices.droplevel(level) + @pytest.mark.parametrize("dtype", ['int64', 'uint64']) def test_constructor_int_dtype_nan_raises(self, dtype): # see gh-15187 diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 37f70090c179f..c9f6bc9151d00 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -99,7 +99,8 @@ def test_where_array_like(self): cond = [False, True] for klass in klasses: - f = lambda: i.where(klass(cond)) + def f(): + return i.where(klass(cond)) pytest.raises(NotImplementedError, f) def test_repeat(self): @@ -2078,7 +2079,7 @@ def test_droplevel_with_names(self): expected = index.droplevel(1) assert dropped.equals(expected) - def test_droplevel_multiple(self): + def test_droplevel_list(self): index = MultiIndex( levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( @@ -2089,6 +2090,16 @@ def test_droplevel_multiple(self): expected = index[:2].droplevel(2).droplevel(0) assert dropped.equals(expected) + dropped = index[:2].droplevel([]) + expected = index[:2] + assert dropped.equals(expected) + + with pytest.raises(ValueError): + index[:2].droplevel(['one', 'two', 'three']) + + with pytest.raises(KeyError): + index[:2].droplevel(['one', 'four']) + def test_drop_not_lexsorted(self): # GH 12078 @@ -2405,7 +2416,8 @@ def check(nlevels, with_nulls): # with a dup if with_nulls: - f = lambda a: np.insert(a, 1000, a[0]) + def f(a): + return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) index = MultiIndex(levels=levels, labels=labels) else:
- [x] closes #21115 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Collateral change: if ``mi`` is a ``MultiIndex``, ``mi.droplevel([])`` will now return ``mi`` itself, not a copy of it. Given that these are immutable objects, seems OK to me - but if a copy is preferred, the change is trivial.
https://api.github.com/repos/pandas-dev/pandas/pulls/21116
2018-05-18T08:18:02Z
2018-05-21T23:13:49Z
2018-05-21T23:13:49Z
2018-06-29T08:41:52Z
DOC: Add linspace range behavior to the timeseries/timedeltas/interval docs
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c81842d3d9212..ec517d3e07bdf 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -924,6 +924,55 @@ bins, with ``NaN`` representing a missing value similar to other dtypes. pd.cut([0, 3, 5, 1], bins=c.categories) + +Generating Ranges of Intervals +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If we need intervals on a regular frequency, we can use the :func:`interval_range` function +to create an ``IntervalIndex`` using various combinations of ``start``, ``end``, and ``periods``. +The default frequency for ``interval_range`` is a 1 for numeric intervals, and calendar day for +datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, end=5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4) + + pd.interval_range(end=pd.Timedelta('3 days'), periods=3) + +The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety +of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals: + +.. ipython:: python + + pd.interval_range(start=0, periods=5, freq=1.5) + + pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W') + + pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H') + +Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals +are closed on. Intervals are closed on the right side by default. + +.. ipython:: python + + pd.interval_range(start=0, end=4, closed='both') + + pd.interval_range(start=0, end=4, closed='neither') + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``IntervalIndex``: + +.. ipython:: python + + pd.interval_range(start=0, end=6, periods=4) + + pd.interval_range(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-02-28'), periods=3) + Miscellaneous indexing FAQ -------------------------- diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 5f3a01f0725d4..745810704f665 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -352,8 +352,8 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the TimedeltaIndex -------------- -To generate an index with time delta, you can use either the ``TimedeltaIndex`` or -the ``timedelta_range`` constructor. +To generate an index with time delta, you can use either the :class:`TimedeltaIndex` or +the :func:`timedelta_range` constructor. Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``, or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values. @@ -363,13 +363,47 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)]) -Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``: +Generating Ranges of Time Deltas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Similar to :func:`date_range`, you can construct regular ranges of a ``TimedeltaIndex`` +using :func:`timedelta_range`. The default frequency for ``timedelta_range`` is +calendar day: + +.. ipython:: python + + pd.timedelta_range(start='1 days', periods=5) + +Various combinations of ``start``, ``end``, and ``periods`` can be used with +``timedelta_range``: + +.. ipython:: python + + pd.timedelta_range(start='1 days', end='5 days') + + pd.timedelta_range(end='10 days', periods=4) + +The ``freq`` parameter can passed a variety of :ref:`frequency aliases <timeseries.offset_aliases>`: .. ipython:: python - pd.timedelta_range(start='1 days', periods=5, freq='D') pd.timedelta_range(start='1 days', end='2 days', freq='30T') + pd.timedelta_range(start='1 days', periods=5, freq='2D5H') + + +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +timedeltas from ``start`` to ``end`` inclusively, with ``periods`` number of elements +in the resulting ``TimedeltaIndex``: + +.. ipython:: python + + pd.timedelta_range('0 days', '4 days', periods=5) + + pd.timedelta_range('0 days', '4 days', periods=10) + Using the TimedeltaIndex ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 73e3e721aad71..1b0cf86995a39 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -393,6 +393,18 @@ of those specified will not be generated: pd.bdate_range(start=start, periods=20) +.. versionadded:: 0.23.0 + +Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced +dates from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the +resulting ``DatetimeIndex``: + +.. ipython:: python + + pd.date_range('2018-01-01', '2018-01-05', periods=5) + + pd.date_range('2018-01-01', '2018-01-05', periods=10) + .. _timeseries.custom-freq-ranges: Custom Frequency Ranges
Follow-up to #21009 `timeseries.rst`: - Added documentation for linspace behavior of `date_range` `timedeltas.rst`: - Cleaned up `timedelta_range` documentation for the standard behavior - Added documentation for linspace behavior of `timedelta_range` `advanced.rst`: - Added documentation for the standard behavior of `interval_range` - Added documentation for linspace behavior of `interval_range`
https://api.github.com/repos/pandas-dev/pandas/pulls/21114
2018-05-18T04:43:06Z
2018-05-22T07:15:04Z
2018-05-22T07:15:04Z
2018-06-22T17:14:57Z
BUG: to_clipboard fails to format output for Excel
diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 987f171878d0b..1ca693755b3c6 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -57,6 +57,7 @@ Fixed Regressions - Bug in both :meth:`DataFrame.first_valid_index` and :meth:`Series.first_valid_index` raised for a row index having duplicate values (:issue:`21441`) - Fixed regression in unary negative operations with object dtype (:issue:`21380`) - Bug in :meth:`Timestamp.ceil` and :meth:`Timestamp.floor` when timestamp is a multiple of the rounding frequency (:issue:`21262`) +- Fixed regression in :func:`to_clipboard` that defaulted to copying dataframes with space delimited instead of tab delimited (:issue:`21104`) .. _whatsnew_0232.performance: diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index dcc221ce978b3..b3f40b3a2429c 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,6 +1,7 @@ """ io on the clipboard """ from pandas import compat, get_option, option_context, DataFrame -from pandas.compat import StringIO, PY2 +from pandas.compat import StringIO, PY2, PY3 +import warnings def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover @@ -32,7 +33,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover # try to decode (if needed on PY3) # Strange. linux py33 doesn't complain, win py33 does - if compat.PY3: + if PY3: try: text = compat.bytes_to_str( text, encoding=(kwargs.get('encoding') or @@ -55,11 +56,27 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: - sep = r'\t' + sep = '\t' + # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get('delim_whitespace') is None: sep = r'\s+' + # Regex separator currently only works with python engine. + # Default to python if separator is multi-character (regex) + if len(sep) > 1 and kwargs.get('engine') is None: + kwargs['engine'] = 'python' + elif len(sep) > 1 and kwargs.get('engine') == 'c': + warnings.warn('read_clipboard with regex separator does not work' + ' properly with c engine') + + # In PY2, the c table reader first encodes text with UTF-8 but Python + # table reader uses the format of the passed string. For consistency, + # encode strings for python engine so that output from python and c + # engines produce consistent results + if kwargs.get('engine') == 'python' and PY2: + text = text.encode('utf-8') + return read_table(StringIO(text), sep=sep, **kwargs) @@ -99,7 +116,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover if excel: try: if sep is None: - sep = r'\t' + sep = '\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) @@ -108,8 +125,11 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover text = text.decode('utf-8') clipboard_set(text) return - except: - pass + except TypeError: + warnings.warn('to_clipboard in excel mode requires a single ' + 'character separator.') + elif sep is not None: + warnings.warn('to_clipboard with excel=False ignores the sep argument') if isinstance(obj, DataFrame): # str(df) has various unhelpful defaults, like truncation diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 80fddd50fc9a8..a6b331685e72a 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -88,8 +88,6 @@ def check_round_trip_frame(self, data, excel=None, sep=None, tm.assert_frame_equal(data, result, check_dtype=False) # Test that default arguments copy as tab delimited - @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' - 'Issue in #21104, Fixed in #21111') def test_round_trip_frame(self, df): self.check_round_trip_frame(df) @@ -99,10 +97,6 @@ def test_round_trip_frame_sep(self, df, sep): self.check_round_trip_frame(df, sep=sep) # Test white space separator - @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " - "aren't handled correctly in default c engine. Fixed " - "in #21111 by defaulting to python engine for " - "whitespace separator") def test_round_trip_frame_string(self, df): df.to_clipboard(excel=False, sep=None) result = read_clipboard() @@ -111,21 +105,17 @@ def test_round_trip_frame_string(self, df): # Two character separator is not supported in to_clipboard # Test that multi-character separators are not silently passed - @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") def test_excel_sep_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=True, sep=r'\t') # Separator is ignored when excel=False and should produce a warning - @pytest.mark.xfail(reason="Not yet implemented. Fixed in #21111") def test_copy_delim_warning(self, df): with tm.assert_produces_warning(): df.to_clipboard(excel=False, sep='\t') # Tests that the default behavior of to_clipboard is tab # delimited and excel="True" - @pytest.mark.xfail(reason="to_clipboard defaults to space delim. Issue in " - "#21104, Fixed in #21111") @pytest.mark.parametrize('sep', ['\t', None, 'default']) @pytest.mark.parametrize('excel', [True, None, 'default']) def test_clipboard_copy_tabs_default(self, sep, excel, df): @@ -139,10 +129,6 @@ def test_clipboard_copy_tabs_default(self, sep, excel, df): assert clipboard_get() == df.to_csv(sep='\t') # Tests reading of white space separated tables - @pytest.mark.xfail(reason="Fails on 'delims' df because quote escapes " - "aren't handled correctly. in default c engine. Fixed " - "in #21111 by defaulting to python engine for " - "whitespace separator") @pytest.mark.parametrize('sep', [None, 'default']) @pytest.mark.parametrize('excel', [False]) def test_clipboard_copy_strings(self, sep, excel, df): @@ -193,8 +179,6 @@ def test_invalid_encoding(self, df): with pytest.raises(NotImplementedError): pd.read_clipboard(encoding='ascii') - @pytest.mark.xfail(reason='to_clipboard defaults to space delim. ' - 'Issue in #21104, Fixed in #21111') @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) def test_round_trip_valid_encodings(self, enc, df): self.check_round_trip_frame(df, encoding=enc)
`DataFrame.to_clipboard` has been broken for pasting to excel. Tables are copied with spaces as delimiters instead of tabs (#21104). This issue originated in https://github.com/pandas-dev/pandas/commit/e1d5a2738235fec22f3cfad4814e09e3e3786f8c#diff-3f25860d9237143c1952a1f93c3aae18R102 which I've partially reverted. By setting the delimiter to `r'\t'`, a 2 character string, obj.to_csv raised an error, but is was caught and passed silently. I reverted the separator to `'\t'`. Similar issue in `read_clipboard` also fixed. - [x] closes #21104
https://api.github.com/repos/pandas-dev/pandas/pulls/21111
2018-05-17T23:18:20Z
2018-06-29T12:22:17Z
2018-06-29T12:22:16Z
2018-07-02T15:44:24Z
PERF: Speedup in printing DataFrames of long strings
Currently, when printing DataFrames, each element of a table to be printed is first formatted in its entirety by `GenericArrayFormatter._format_strings` and then truncated by `_make_fixed_width`. When processing DataFrames that contain very long strings, there is a noticeable computation delay when calling `DataFrame.__repr__`. The bottleneck is in `_format_strings`, where each string runs several `str.replace` to format it for printing. We can eliminate wasted time by slicing any any strings longer than the maximum column width **before** pretty formatting the string, leaving enough buffer space such that formatting the string won't bring it below the maximum column width. ### Performance Demonstration Setup: ```python >>> data = pd.DataFrame([str(list(range(5000000)))]*50) >>> len(data.values[0][0]) #length of each string 3888890 >>> _ = data.__repr__() ``` Final steps goes from 0.97386 seconds to 0.0089 seconds to execute.
https://api.github.com/repos/pandas-dev/pandas/pulls/21110
2018-05-17T22:36:43Z
2018-06-19T00:04:17Z
null
2018-06-19T00:35:52Z
DOC: Add sphinx spelling extension
diff --git a/Makefile b/Makefile index c79175cd3c401..4a82566cf726e 100644 --- a/Makefile +++ b/Makefile @@ -23,3 +23,4 @@ doc: cd doc; \ python make.py clean; \ python make.py html + python make.py spellcheck diff --git a/doc/make.py b/doc/make.py index 4967f30453fd1..4d54a2415a194 100755 --- a/doc/make.py +++ b/doc/make.py @@ -224,8 +224,9 @@ def _sphinx_build(self, kind): -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ - if kind not in ('html', 'latex'): - raise ValueError('kind must be html or latex, not {}'.format(kind)) + if kind not in ('html', 'latex', 'spelling'): + raise ValueError('kind must be html, latex or ' + 'spelling, not {}'.format(kind)) self._run_os('sphinx-build', '-j{}'.format(self.num_jobs), @@ -304,6 +305,18 @@ def zip_html(self): '-q', *fnames) + def spellcheck(self): + """Spell check the documentation.""" + self._sphinx_build('spelling') + output_location = os.path.join('build', 'spelling', 'output.txt') + with open(output_location) as output: + lines = output.readlines() + if lines: + raise SyntaxError( + 'Found misspelled words.' + ' Check pandas/doc/build/spelling/output.txt' + ' for more details.') + def main(): cmds = [method for method in dir(DocBuilder) if not method.startswith('_')] diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c81842d3d9212..19d745121ce17 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -342,7 +342,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. columns=micolumns).sort_index().sort_index(axis=1) dfmi -Basic multi-index slicing using slices, lists, and labels. +Basic MultiIndex slicing using slices, lists, and labels. .. ipython:: python @@ -990,7 +990,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be KeyError: 'Cannot get right slice bound for non-unique label: 3' :meth:`Index.is_monotonic_increasing` and :meth:`Index.is_monotonic_decreasing` only check that -an index is weakly monotonic. To check for strict montonicity, you can combine one of those with +an index is weakly monotonic. To check for strict monotonicity, you can combine one of those with :meth:`Index.is_unique` .. ipython:: python diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 8d09f1fc04c1f..d4efa8a28f6c5 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -593,7 +593,7 @@ categorical columns: frame = pd.DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)}) frame.describe() -This behaviour can be controlled by providing a list of types as ``include``/``exclude`` +This behavior can be controlled by providing a list of types as ``include``/``exclude`` arguments. The special value ``all`` can also be used: .. ipython:: python diff --git a/doc/source/conf.py b/doc/source/conf.py index d516e67b947ba..97081bec863b7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -73,10 +73,14 @@ 'sphinx.ext.ifconfig', 'sphinx.ext.linkcode', 'nbsphinx', + 'sphinxcontrib.spelling' ] exclude_patterns = ['**.ipynb_checkpoints'] +spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt'] +spelling_ignore_pypi_package_names = True + with open("index.rst") as f: index_rst_lines = f.readlines() diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index e9939250052f1..6ae93ba46fa5c 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -436,6 +436,25 @@ the documentation are also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__, see also the :ref:`Continuous Integration <contributing.ci>` section. +Spell checking documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When contributing to documentation to **pandas** it's good to check if your work +contains any spelling errors. Sphinx provides an easy way to spell check documentation +and docstrings. + +Running the spell check is easy. Just navigate to your local ``pandas/doc/`` directory and run:: + + python make.py spellcheck + +The spellcheck will take a few minutes to run (between 1 to 6 minutes). Sphinx will alert you +with warnings and misspelt words - these misspelt words will be added to a file called +``output.txt`` and you can find it on your local directory ``pandas/doc/build/spelling/``. + +The Sphinx spelling extension uses an EN-US dictionary to correct words, what means that in +some cases you might need to add a word to this dictionary. You can do so by adding the word to +the bag-of-words file named ``spelling_wordlist.txt`` located in the folder ``pandas/doc/``. + .. _contributing.code: Contributing to the code base diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst index f80bfd9253764..6b2ecfe66d5e2 100644 --- a/doc/source/contributing_docstring.rst +++ b/doc/source/contributing_docstring.rst @@ -103,7 +103,7 @@ left before or after the docstring. The text starts in the next line after the opening quotes. The closing quotes have their own line (meaning that they are not at the end of the last sentence). -In rare occasions reST styles like bold text or itallics will be used in +In rare occasions reST styles like bold text or italics will be used in docstrings, but is it common to have inline code, which is presented between backticks. It is considered inline code: @@ -706,7 +706,7 @@ than 5, to show the example with the default values. If doing the ``mean``, we could use something like ``[1, 2, 3]``, so it is easy to see that the value returned is the mean. -For more complex examples (groupping for example), avoid using data without +For more complex examples (grouping for example), avoid using data without interpretation, like a matrix of random numbers with columns A, B, C, D... And instead use a meaningful example, which makes it easier to understand the concept. Unless required by the example, use names of animals, to keep examples @@ -877,7 +877,7 @@ be tricky. Here are some attention points: the actual error only the error name is sufficient. * If there is a small part of the result that can vary (e.g. a hash in an object - represenation), you can use ``...`` to represent this part. + representation), you can use ``...`` to represent this part. If you want to show that ``s.plot()`` returns a matplotlib AxesSubplot object, this will fail the doctest :: diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 893642410af02..fdc3b38cfdebc 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -286,7 +286,7 @@ New Columns df = pd.DataFrame( {'AAA' : [1,1,1,2,2,2,3,3], 'BBB' : [2,1,3,4,5,1,2,3]}); df -Method 1 : idxmin() to get the index of the mins +Method 1 : idxmin() to get the index of the minimums .. ipython:: python @@ -307,7 +307,7 @@ MultiIndexing The :ref:`multindexing <advanced.hierarchical>` docs. -`Creating a multi-index from a labeled frame +`Creating a MultiIndex from a labeled frame <http://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels>`__ .. ipython:: python @@ -330,7 +330,7 @@ The :ref:`multindexing <advanced.hierarchical>` docs. Arithmetic ********** -`Performing arithmetic with a multi-index that needs broadcasting +`Performing arithmetic with a MultiIndex that needs broadcasting <http://stackoverflow.com/questions/19501510/divide-entire-pandas-multiindex-dataframe-by-dataframe-variable/19502176#19502176>`__ .. ipython:: python @@ -342,7 +342,7 @@ Arithmetic Slicing ******* -`Slicing a multi-index with xs +`Slicing a MultiIndex with xs <http://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes>`__ .. ipython:: python @@ -363,7 +363,7 @@ To take the cross section of the 1st level and 1st axis the index: df.xs('six',level=1,axis=0) -`Slicing a multi-index with xs, method #2 +`Slicing a MultiIndex with xs, method #2 <http://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas>`__ .. ipython:: python @@ -386,13 +386,13 @@ To take the cross section of the 1st level and 1st axis the index: df.loc[(All,'Math'),('Exams')] df.loc[(All,'Math'),(All,'II')] -`Setting portions of a multi-index with xs +`Setting portions of a MultiIndex with xs <http://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill>`__ Sorting ******* -`Sort by specific column or an ordered list of columns, with a multi-index +`Sort by specific column or an ordered list of columns, with a MultiIndex <http://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas>`__ .. ipython:: python @@ -664,7 +664,7 @@ The :ref:`Pivot <reshaping.pivot>` docs. `Plot pandas DataFrame with year over year data <http://stackoverflow.com/questions/30379789/plot-pandas-data-frame-with-year-over-year-data>`__ -To create year and month crosstabulation: +To create year and month cross tabulation: .. ipython:: python @@ -677,7 +677,7 @@ To create year and month crosstabulation: Apply ***** -`Rolling Apply to Organize - Turning embedded lists into a multi-index frame +`Rolling Apply to Organize - Turning embedded lists into a MultiIndex frame <http://stackoverflow.com/questions/17349981/converting-pandas-dataframe-with-categorical-values-into-binary-values>`__ .. ipython:: python @@ -1029,8 +1029,8 @@ Skip row between header and data 01.01.1990 05:00;21;11;12;13 """ -Option 1: pass rows explicitly to skiprows -"""""""""""""""""""""""""""""""""""""""""" +Option 1: pass rows explicitly to skip rows +""""""""""""""""""""""""""""""""""""""""""" .. ipython:: python diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index ca6cefac9e842..b5b56fc6815c9 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -1014,7 +1014,7 @@ Deprecate Panel Over the last few years, pandas has increased in both breadth and depth, with new features, datatype support, and manipulation routines. As a result, supporting efficient indexing and functional routines for ``Series``, ``DataFrame`` and ``Panel`` has contributed to an increasingly fragmented and -difficult-to-understand codebase. +difficult-to-understand code base. The 3-D structure of a ``Panel`` is much less common for many types of data analysis, than the 1-D of the ``Series`` or the 2-D of the ``DataFrame``. Going forward it makes sense for @@ -1023,7 +1023,7 @@ pandas to focus on these areas exclusively. Oftentimes, one can simply use a MultiIndex ``DataFrame`` for easily working with higher dimensional data. In addition, the ``xarray`` package was built from the ground up, specifically in order to -support the multi-dimensional analysis that is one of ``Panel`` s main usecases. +support the multi-dimensional analysis that is one of ``Panel`` s main use cases. `Here is a link to the xarray panel-transition documentation <http://xarray.pydata.org/en/stable/pandas.html#panel-transition>`__. .. ipython:: python diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 30cdb06b28487..8631ec7878af5 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -184,8 +184,8 @@ and metadata disseminated in `SDMX <http://www.sdmx.org>`_ 2.1, an ISO-standard widely used by institutions such as statistics offices, central banks, and international organisations. pandaSDMX can expose datasets and related -structural metadata including dataflows, code-lists, -and datastructure definitions as pandas Series +structural metadata including data flows, code-lists, +and data structure definitions as pandas Series or multi-indexed DataFrames. `fredapi <https://github.com/mortada/fredapi>`__ @@ -260,7 +260,7 @@ Data validation `Engarde <http://engarde.readthedocs.io/en/latest/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Engarde is a lightweight library used to explicitly state your assumptions abour your datasets +Engarde is a lightweight library used to explicitly state your assumptions about your datasets and check that they're *actually* true. .. _ecosystem.extensions: diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index b786b1d0c134a..979d025111df1 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -32,7 +32,7 @@ Cython (Writing C extensions for pandas) ---------------------------------------- For many use cases writing pandas in pure Python and NumPy is sufficient. In some -computationally heavy applications however, it can be possible to achieve sizeable +computationally heavy applications however, it can be possible to achieve sizable speed-ups by offloading work to `cython <http://cython.org/>`__. This tutorial assumes you have refactored as much as possible in Python, for example @@ -806,7 +806,7 @@ truncate any strings that are more than 60 characters in length. Second, we can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be evaluated in Python space. -The upshot is that this *only* applies to object-dtype'd expressions. So, if +The upshot is that this *only* applies to object-dtype expressions. So, if you have an expression--for example .. ipython:: python diff --git a/doc/source/extending.rst b/doc/source/extending.rst index f665b219a7bd1..431c69bc0b6b5 100644 --- a/doc/source/extending.rst +++ b/doc/source/extending.rst @@ -167,7 +167,7 @@ you can retain subclasses through ``pandas`` data manipulations. There are 3 constructor properties to be defined: -- ``_constructor``: Used when a manipulation result has the same dimesions as the original. +- ``_constructor``: Used when a manipulation result has the same dimensions as the original. - ``_constructor_sliced``: Used when a manipulation result has one lower dimension(s) as the original, such as ``DataFrame`` single columns slicing. - ``_constructor_expanddim``: Used when a manipulation result has one higher dimension as the original, such as ``Series.to_frame()`` and ``DataFrame.to_panel()``. diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index da13a34cccfea..1c4c3f93726a9 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -994,7 +994,7 @@ is only interesting over one column (here ``colname``), it may be filtered Handling of (un)observed Categorical values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When using a ``Categorical`` grouper (as a single grouper, or as part of multipler groupers), the ``observed`` keyword +When using a ``Categorical`` grouper (as a single grouper, or as part of multiple groupers), the ``observed`` keyword controls whether to return a cartesian product of all possible groupers values (``observed=False``) or only those that are observed groupers (``observed=True``). @@ -1010,7 +1010,7 @@ Show only the observed values: pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], categories=['a', 'b']), observed=True).count() -The returned dtype of the grouped will *always* include *all* of the catergories that were grouped. +The returned dtype of the grouped will *always* include *all* of the categories that were grouped. .. ipython:: python diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index e834efd1cb6d1..2b9fcf874ef22 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -700,7 +700,7 @@ Current Behavior Reindexing ~~~~~~~~~~ -The idiomatic way to achieve selecting potentially not-found elmenents is via ``.reindex()``. See also the section on :ref:`reindexing <basics.reindexing>`. +The idiomatic way to achieve selecting potentially not-found elements is via ``.reindex()``. See also the section on :ref:`reindexing <basics.reindexing>`. .. ipython:: python diff --git a/doc/source/install.rst b/doc/source/install.rst index 6054be112f52c..e655136904920 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -31,7 +31,7 @@ PyPI and through conda. Starting **January 1, 2019**, all releases will be Python 3 only. If there are people interested in continued support for Python 2.7 past December -31, 2018 (either backporting bugfixes or funding) please reach out to the +31, 2018 (either backporting bug fixes or funding) please reach out to the maintainers on the issue tracker. For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_. @@ -199,7 +199,7 @@ Running the test suite ---------------------- pandas is equipped with an exhaustive set of unit tests, covering about 97% of -the codebase as of this writing. To run it on your machine to verify that +the code base as of this writing. To run it on your machine to verify that everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest <http://doc.pytest.org/en/latest/>`__ and run: diff --git a/doc/source/internals.rst b/doc/source/internals.rst index b120e3a98db7f..caf5790fb24c6 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -41,7 +41,7 @@ There are functions that make the creation of a regular index easy: - ``date_range``: fixed frequency date range generated from a time rule or DateOffset. An ndarray of Python datetime objects - ``period_range``: fixed frequency date range generated from a time rule or - DateOffset. An ndarray of ``Period`` objects, representing Timespans + DateOffset. An ndarray of ``Period`` objects, representing timespans The motivation for having an ``Index`` class in the first place was to enable different implementations of indexing. This means that it's possible for you, diff --git a/doc/source/io.rst b/doc/source/io.rst index aa2484b0cb5c3..7bd56d52b3492 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -116,7 +116,7 @@ header : int or list of ints, default ``'infer'`` existing names. The header can be a list of ints that specify row locations - for a multi-index on the columns e.g. ``[0,1,3]``. Intervening rows + for a MultiIndex on the columns e.g. ``[0,1,3]``. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so header=0 denotes the first @@ -503,7 +503,7 @@ This matches the behavior of :meth:`Categorical.set_categories`. converted using the :func:`to_numeric` function, or as appropriate, another converter such as :func:`to_datetime`. - When ``dtype`` is a ``CategoricalDtype`` with homogenous ``categories`` ( + When ``dtype`` is a ``CategoricalDtype`` with homogeneous ``categories`` ( all numeric, all datetimes, etc.), the conversion is done automatically. .. ipython:: python @@ -554,7 +554,7 @@ If the header is in a row other than the first, pass the row number to Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column names - are inferred from the first nonblank line of the file, if column + are inferred from the first non-blank line of the file, if column names are passed explicitly then the behavior is identical to ``header=None``. @@ -868,7 +868,7 @@ data columns: df .. note:: - If a column or index contains an unparseable date, the entire column or + If a column or index contains an unparsable date, the entire column or index will be returned unaltered as an object data type. For non-standard datetime parsing, use :func:`to_datetime` after ``pd.read_csv``. @@ -1644,7 +1644,7 @@ over the string representation of the object. All arguments are optional: argument and returns a formatted string; to be applied to floats in the ``DataFrame``. - ``sparsify`` default True, set to False for a ``DataFrame`` with a hierarchical - index to print every multiindex key at each row. + index to print every MultiIndex key at each row. - ``index_names`` default True, will print the names of the indices - ``index`` default True, will print the index (ie, row labels) - ``header`` default True, will print the column labels @@ -2178,7 +2178,7 @@ A few notes on the generated table schema: - The ``schema`` object contains a ``pandas_version`` field. This contains the version of pandas' dialect of the schema, and will be incremented with each revision. -- All dates are converted to UTC when serializing. Even timezone naïve values, +- All dates are converted to UTC when serializing. Even timezone naive values, which are treated as UTC with an offset of 0. .. ipython:: python @@ -2245,7 +2245,7 @@ A few notes on the generated table schema: .. versionadded:: 0.23.0 ``read_json`` also accepts ``orient='table'`` as an argument. This allows for -the preserveration of metadata such as dtypes and index names in a +the preservation of metadata such as dtypes and index names in a round-trippable manner. .. ipython:: python @@ -2356,7 +2356,7 @@ Read a URL and match a table that contains specific text: Specify a header row (by default ``<th>`` or ``<td>`` elements located within a ``<thead>`` are used to form the column index, if multiple rows are contained within -``<thead>`` then a multiindex is created); if specified, the header row is taken +``<thead>`` then a multi-index is created); if specified, the header row is taken from the data minus the parsed header elements (``<th>`` elements). .. code-block:: python @@ -3141,7 +3141,7 @@ any pickled pandas object (or any other pickled object) from file: .. warning:: - Several internal refactorings have been done while still preserving + Several internal refactoring have been done while still preserving compatibility with pickles created with older versions of pandas. However, for such cases, pickled ``DataFrames``, ``Series`` etc, must be read with ``pd.read_pickle``, rather than ``pickle.load``. @@ -4721,7 +4721,7 @@ writes ``data`` to the database in batches of 1000 rows at a time: .. note:: - The function :func:`~pandas.DataFrame.to_sql` will perform a multivalue + The function :func:`~pandas.DataFrame.to_sql` will perform a multi-value insert if the engine dialect ``supports_multivalues_insert``. This will greatly speed up the insert in some cases. diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 1161656731f88..0de6b871712a3 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -1310,7 +1310,7 @@ For this, use the :meth:`~DataFrame.combine_first` method: Note that this method only takes values from the right ``DataFrame`` if they are missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`, -alters non-NA values inplace: +alters non-NA values in place: .. ipython:: python :suppress: diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 3950e4c80749b..e4b5578af15f0 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -105,7 +105,7 @@ Datetimes For datetime64[ns] types, ``NaT`` represents missing values. This is a pseudo-native sentinel value that can be represented by NumPy in a singular dtype (datetime64[ns]). -pandas objects provide intercompatibility between ``NaT`` and ``NaN``. +pandas objects provide compatibility between ``NaT`` and ``NaN``. .. ipython:: python @@ -349,7 +349,7 @@ Interpolation The ``limit_area`` keyword argument was added. Both Series and DataFrame objects have :meth:`~DataFrame.interpolate` -that, by default, performs linear interpolation at missing datapoints. +that, by default, performs linear interpolation at missing data points. .. ipython:: python :suppress: diff --git a/doc/source/names_wordlist.txt b/doc/source/names_wordlist.txt new file mode 100644 index 0000000000000..032883b7febf6 --- /dev/null +++ b/doc/source/names_wordlist.txt @@ -0,0 +1,1652 @@ +Critchley +Villanova +del +Hohmann +Rychyk +Buchkovsky +Lenail +Schade +datetimeindex +Aly +Sivji +Költringer +Bui +András +Novoszáth +Anh +Anil +Pallekonda +Pitrou +Linde +Quinonez +Varshokar +Artem +Bogachev +Avi +Azeez +Oluwafemi +Auffarth +Thiel +Bhavesh +Poddar +Haffner +Naul +Guinta +Moreira +García +Márquez +Cheuk +Chitrank +Dixit +Catalfo +Mazzullo +Chwala +Cihan +Ceyhan +Brunner +Riemenschneider +Dixey +Garrido +Sakuma +Hirschfeld +Adrián +Cañones +Castellano +Arcos +Hoese +Stansby +Kamau +Niederhut +Dror +Atariah +Chea +Kisslinger +Retkowski +Sar +Maeztu +Gianpaolo +Macario +Giftlin +Rajaiah +Olimpio +Gjelt +Inggs +Grzegorz +Konefał +Guilherme +Beltramini +Pitkeathly +Mashkoor +Ferchland +Haochen +Hissashi +Sharaf +Ignasi +Fosch +Alves +Shelvinskyi +Imanflow +Ingolf +Saeta +Pérez +Koevska +Jakub +Nowacki +Werkmann +Zoutkamp +Bandlow +Jaume +Bonet +Alammar +Reback +Jing +Qiang +Goh +Miralles +Nothman +Joeun +Metz +Mease +Schulze +Jongwony +Jordi +Contestí +Joris +Bossche +José +Fonseca +Jovixe +Jörg +Döpfert +Ittoku +Surta +Kuhl +Krzysztof +Chomski +Ksenia +Ksenia +Bobrova +Kunal +Gosar +Kerstein +Laksh +Arora +Geffert +Licht +Takeuchi +Liudmila +Villalba +Manan +Singh +Manraj +Singh +Hemken +Bibiloni +Corchero +Woodbridge +Journois +Gallo +Heikkilä +Braymer +Maybeno +Rocklin +Roeschke +Bussonnier +Mikhaylov +Veksler +Roos +Maximiliano +Greco +Penkov +Röttger +Selik +Waskom +Mie +Kutzma +Mitar +Negus +Münst +Mortada +Mehyar +Braithwaite +Chmura +Karagiannakis +Nipun +Sadvilkar +Martensen +Noémi +Éltető +Bilodeau +Ondrej +Kokes +Onno +Ganssle +Mannino +Reidy +Oliveira +Hoffmann +Ngo +Battiston +Pranav +Suri +Priyanka +Ojha +Pulkit +Maloo +Magliocchetti +Ridhwan +Luthra +Kiplang'at +Rohan +Pandit +Rok +Mihevc +Rouz +Azari +Ryszard +Kaleta +Samir +Musali +Sinayoko +Sangwoong +Yoon +Sharad +Vijalapuram +Shubham +Chaudhary +Sietse +Brouwer +Delprete +Cianciulli +Childs +Stijn +Hoey +Talitha +Pumar +Tarbo +Fukazawa +Petrou +Caswell +Hoffmann +Swast +Augspurger +Tulio +Casagrande +Tushar +Tushar +Mittal +Upkar +Lidder +Vinícius +Figueiredo +Vipin +WBare +Wenhuan +Ayd +Xbar +Yaroslav +Halchenko +Yee +Mey +Yeongseon +Choe +Yian +Yimeng +Zhang +Zihao +Zhao +adatasetaday +akielbowicz +akosel +alinde +amuta +bolkedebruin +cbertinato +cgohlke +charlie +chris +csfarkas +dajcs +deflatSOCO +derestle +htwg +discort +dmanikowski +donK +elrubio +fivemok +fjdiod +fjetter +froessler +gabrielclow +gfyoung +ghasemnaddaf +vetinari +himanshu +awasthi +ignamv +jayfoad +jazzmuesli +jbrockmendel +jjames +joaoavf +joders +jschendel +juan +huguet +luzpaz +mdeboc +miguelmorin +miker +miquelcamprodon +orereta +ottiP +peterpanmj +rafarui +raph +readyready +rmihael +samghelms +scriptomation +sfoo +stefansimik +stonebig +tmnhat +tomneep +tv +verakai +xpvpc +zhanghui +API +Mazzullo +Riemenschneider +Hirschfeld +Stansby +Dror +Atariah +Kisslinger +Ingolf +Werkmann +Reback +Joris +Bossche +Jörg +Döpfert +Kuhl +Krzysztof +Chomski +Licht +Takeuchi +Manraj +Singh +Braymer +Waskom +Mie +Hoffmann +Sietse +Brouwer +Swast +Augspurger +Ayd +Yee +Mey +bolkedebruin +cgohlke +derestle +htwg +fjdiod +gabrielclow +gfyoung +ghasemnaddaf +jbrockmendel +jschendel +miker +pypy +Gleave +Liaw +Velasco +Yee +Marchenko +Amol +Winkler +亮 +André +Jonasson +Sweger +Berkay +Haffner +Tu +Chankey +Pathak +Billington +Filo +Gorgolewski +Mazzullo +Prinoth +Stade +Schuldt +Moehl +Himmelstein +Willmer +Niederhut +Wieser +Fredriksen +Kint +Giftlin +Giftlin +Rajaiah +Guilherme +Beltramini +Guillem +Borrell +Hanmin +Qin +Makait +Hussain +Tamboli +Miholic +Novotný +Helie +Schiratti +Deschenes +Knupp +Reback +Tratner +Nothman +Crall +Mease +Helmus +Joris +Bossche +Bochi +Kuhlmann +Brabandere +Keeton +Keiron +Pizzey +Kernc +Licht +Takeuchi +Kushner +Jelloul +Makarov +Malgorzata +Turzanska +Sy +Roeschke +Picus +Mehmet +Akmanalp +Gasvoda +Penkov +Eubank +Shteynbuk +Tillmann +Pankaj +Pandey +Luo +O'Melveny +Reidy +Quackenbush +Yanovich +Haessig +Battiston +Pradyumna +Reddy +Chinthala +Prasanjit +Prakash +Sangwoong +Yoon +Sudeep +Telt +Caswell +Swast +Augspurger +Tuan +Utkarsh +Upadhyay +Vivek +Aiyong +WBare +Yi +Liu +Yosuke +Nakabayashi +aaron +abarber +gh +aernlund +agustín +méndez +andymaheshw +aviolov +bpraggastis +cbertinato +cclauss +chernrick +chris +dkamm +dwkenefick +faic +fding +gfyoung +guygoldberg +hhuuggoo +huashuai +ian +iulia +jaredsnyder +jbrockmendel +jdeschenes +jebob +jschendel +keitakurita +kernc +kiwirob +kjford +linebp +lloydkirk +louispotok +majiang +manikbhandari +matthiashuschle +mattip +maxwasserman +mjlove +nmartensen +parchd +philipphanemann +rdk +reidy +ri +ruiann +rvernica +weigand +scotthavard +skwbc +tobycheese +tsdlovell +ysau +zzgao +cov +abaldenko +adrian +stepien +Saxena +Akash +Tandon +Aleksey +Bilogur +alexandercbooth +Amol +Kahat +Winkler +Kittredge +Anthonios +Partheniou +Arco +Ashish +Singal +atbd +bastewart +Baurzhan +Muftakhidinov +Kandel +bmagnusson +carlosdanielcsantos +Souza +chaimdemulder +chris +Aycock +Gohlke +Paulik +Warth +Brunner +Himmelstein +Willmer +Krych +dickreuter +Dimitris +Spathis +discort +Dmitry +Suria +Wijaya +Stanczak +dr +leo +dubourg +dwkenefick +Andrade +Ennemoser +Francesc +Alted +Fumito +Hamamura +funnycrab +gfyoung +Ferroni +goldenbull +Jeffries +Guilherme +Beltramini +Guilherme +Samora +Hao +Harshit +Patni +Ilya +Schurov +Iván +Vallés +Pérez +Leng +Jaehoon +Hwang +Goppert +Santucci +Reback +Crist +Jevnik +Nothman +Zwinck +jojomdt +Whitmore +Mease +Mease +Joost +Kranendonk +Joris +Bossche +Bradt +Santander +Julien +Marrec +Solinsky +Kacawi +Kamal +Kamalaldin +Shedden +Kernc +Keshav +Ramaswamy +Ren +linebp +Pedersen +Cestaro +Scarabello +Lukasz +paramstyle +Lababidi +Unserialized +manu +manuels +Roeschke +mattip +Picus +Roeschke +maxalbert +Roos +mcocdawc +Lamparski +Michiel +Mikolaj +Chwalisz +Miroslav +Šedivý +Mykola +Golubyev +Rud +Halen +Chmura +nuffe +Pankaj +Pandey +paul +mannino +Pawel +Kordek +pbreach +Csizsek +Petio +Petrov +Ruffwind +Battiston +Chromiec +Prasanjit +Prakash +Forgione +Rouz +Azari +Sahil +Dua +sakkemo +Sami +Salonen +Sarma +Tangirala +scls +Gsänger +Sébastien +Menten +Heide +Shyam +Saladi +sinhrks +Sinhrks +Rauch +stijnvanhoey +Adiseshan +themrmax +Thiago +Serafim +Thoralf +Thrasibule +Gustafsson +Augspurger +tomrod +Shen +tzinckgraf +Uwe +wandersoncferreira +watercrossing +wcwagner +Wiktor +Tomczak +xgdgsc +Yaroslav +Halchenko +Yimeng +Zhang +yui +knk +Saxena +Kandel +Aycock +Himmelstein +Willmer +gfyoung +hesham +shabana +Reback +Jevnik +Joris +Bossche +Santander +Shedden +Keshav +Ramaswamy +Scarabello +Picus +Roeschke +Roos +Mykola +Golubyev +Halen +Pawel +Kordek +Battiston +sinhrks +Adiseshan +Augspurger +wandersoncferreira +Yaroslav +Halchenko +Chainz +Anthonios +Partheniou +Arash +Rouhani +Kandel +chris +Warth +Krych +dubourg +gfyoung +Iván +Vallés +Pérez +Reback +Jevnik +Mease +Joris +Bossche +Keshav +Ramaswamy +Ren +mattrijk +paul +mannino +Chromiec +Sinhrks +Thiago +Serafim +adneu +agraboso +Alekseyev +Vig +Riddell +Amol +Amol +Agrawal +Anthonios +Partheniou +babakkeyvani +Kandel +Baxley +Camilo +Cota +chris +Grinolds +Hudon +Aycock +Warth +cmazzullo +cr +Siladji +Drewrey +Lupton +dsm +Blancas +Marsden +Marczinowski +O'Donovan +Gábor +Lipták +Geraint +gfyoung +Ferroni +Haleemur +harshul +Hassan +Shamim +iamsimha +Iulius +Nazarov +jackieleng +Reback +Crist +Jevnik +Liekezer +Zwinck +Erenrich +Joris +Bossche +Howes +Brandys +Kamil +Sindi +Ka +Wo +Shedden +Kernc +Brucher +Roos +Scherer +Mortada +Mehyar +mpuels +Haseeb +Tariq +Bonnotte +Virtanen +Mestemaker +Pawel +Kordek +Battiston +pijucha +Jucha +priyankjain +Nimmi +Gieseke +Keyes +Sahil +Dua +Sanjiv +Lobo +Sašo +Stanovnik +Heide +sinhrks +Sinhrks +Kappel +Choi +Sudarshan +Konge +Caswell +Augspurger +Uwe +Hoffmann +wcwagner +Xiang +Zhang +Yadunandan +Yaroslav +Halchenko +YG +Riku +Yuichiro +Kaneko +yui +knk +zhangjinjie +znmean +颜发才 +Yan +Facai +Fiore +Gartland +Bastiaan +Benoît +Vinot +Fustin +Freitas +Ter +Livschitz +Gábor +Lipták +Hassan +Kibirige +Iblis +Saeta +Pérez +Wolosonovich +Reback +Jevnik +Joris +Bossche +Storck +Ka +Wo +Shedden +Kieran +O'Mahony +Lababidi +Maoyuan +Liu +Wittmann +MaxU +Roos +Droettboom +Eubank +Bonnotte +Virtanen +Battiston +Prabhjot +Singh +Augspurger +Aiyong +Winand +Xbar +Yan +Facai +adneu +ajenkins +cargometrics +behzad +nouri +chinskiy +gfyoung +jeps +jonaslb +kotrfa +nileracecrew +onesandzeroes +sinhrks +tsdlovell +Alekseyev +Rosenfeld +Anthonios +Partheniou +Sipos +Carroux +Aycock +Scanlin +Da +Dorozhko +O'Donovan +Cleary +Gianluca +Jeffries +Horel +Schwabacher +Deschenes +Reback +Jevnik +Fremlin +Hoersch +Joris +Bossche +Joris +Vankerschaver +Ka +Wo +Keming +Zhang +Shedden +Farrugia +Lurie +Roos +Mayank +Asthana +Mortada +Mehyar +Moussa +Taifi +Navreet +Bonnotte +Reiners +Gura +Battiston +Carnevale +Rinoc +Rishipuri +Sangmin +Lasley +Sereger +Seabold +Thierry +Moisan +Caswell +Augspurger +Hauck +Varun +Yoong +Kang +Lim +Yoshiki +Vázquez +Baeza +Joong +Younggun +Yuval +Langer +argunov +behzad +nouri +boombard +brian +pantano +chromy +daniel +dgram +gfyoung +hcontrast +jfoo +kaustuv +deolal +llllllllll +ranarag +rockg +scls +seales +sinhrks +srib +surveymedia +tworec +Drozd +Anthonios +Partheniou +Berendt +Piersall +Hamed +Saljooghinejad +Iblis +Deschenes +Reback +Callin +Joris +Bossche +Ka +Wo +Loïc +Séguin +Luo +Yicheng +Magnus +Jöud +Leonhardt +Roos +Bonnotte +Pastafarianist +Chong +Schaf +Philipp +deCarvalho +Khomenko +Rémy +Léone +Thierry +Moisan +Augspurger +Varun +Hoffmann +Winterflower +Younggun +ajcr +azuranski +behzad +nouri +cel +emilydolson +hironow +lexual +llllllllll +rockg +silentquasar +sinhrks +taeold +unparseable +Rothberg +Bedini +Rosenfeld +Anthonios +Partheniou +Artemy +Kolchinsky +Willers +Gohlke +Clearfield +Ringwalt +Cottrell +Gagne +Schettino +Panfilov +Araujo +Gianluca +Poulin +Nisar +Henriksen +Hoegen +Jaidev +Deshpande +Swails +Reback +Buyl +Joris +Bossche +Joris +Vankerschaver +Julien +Danjou +Ka +Wo +Kehoe +Jordahl +Shedden +Buitinck +Gambogi +Savoie +Roos +D'Agostino +Mortada +Mehyar +Eubank +Nipun +Batra +Ondřej +Čertík +Pratap +Vardhan +Rafal +Skolasinski +Rinoc +Gieseke +Safia +Abdalla +Saumitra +Shahapure +Pölsterl +Rubbert +Sinhrks +Siu +Kwan +Seabold +Carrucciu +Hoyer +Pascoe +Santegoeds +Grainger +Tjerk +Santegoeds +Augspurger +Winterflower +Yaroslav +Halchenko +agijsberts +ajcr +behzad +nouri +cel +cyrusmaher +davidovitch +ganego +jreback +juricast +larvian +maximilianr +msund +rekcahpassyla +robertzk +scls +seth +sinhrks +springcoil +terrytangyuan +tzinckgraf +Rosenfeld +Artemy +Kolchinsky +Willers +Christer +der +Meeren +Hudon +Lasiman +Brundu +Gaëtan +Menten +Hiebert +Reback +Joris +Bossche +Ka +Wo +Mortada +Mehyar +Grainger +Ajamian +Augspurger +Yoshiki +Vázquez +Baeza +Younggun +austinc +behzad +nouri +jreback +lexual +rekcahpassyla +scls +sinhrks +Artemy +Kolchinsky +Gilmer +Grinolds +Birken +Hirschfeld +Dunné +Hatem +Nassrat +Sperr +Herter +Blackburne +Reback +Crist +Abernot +Joris +Bossche +Shedden +Razoumov +Riel +Mortada +Mehyar +Eubank +Grisel +Battiston +Hyunjin +Zhang +Hoyer +Tiago +Antao +Ajamian +Augspurger +Tomaz +Berisa +Shirgur +Filimonov +Hogman +Yasin +Younggun +behzad +nouri +dsm +floydsoft +gfr +jnmclarty +jreback +ksanghai +lucas +mschmohl +ptype +rockg +scls +sinhrks +Toth +Amici +Artemy +Kolchinsky +Ashwini +Chaudhary +Letson +Chau +Hoang +Christer +der +Meeren +Cottrell +Ehsan +Azarnasab +Torcasso +Sexauer +Reback +Joris +Bossche +Joschka +zur +Jacobsmühlen +Bochi +Junya +Hayashi +Shedden +Kieran +O'Mahony +Kodi +Arfer +Airas +Mortada +Mehyar +Lasley +Lasley +Pascual +Seabold +Hoyer +Grainger +Augspurger +Filimonov +Vyomkesh +Tripathi +Holmgren +Yulong +behzad +nouri +bertrandhaut +bjonen +cel +clham +hsperr +ischwabacher +jnmclarty +josham +jreback +omtinez +roch +sinhrks +unutbu +Angelos +Evripiotis +Artemy +Kolchinsky +Pointet +Jacobowski +Charalampos +Papaloizou +Warth +Zanini +Francesc +Kleynhans +Reback +Tratner +Joris +Bossche +Suggit +Lasley +Hoyer +Sylvain +Corlay +Grainger +Tiago +Antao +Hauck +Chaves +Salgado +Bhandoh +Aiyong +Holmgren +behzad +nouri +broessli +charalampos +papaloizou +immerrr +jnmclarty +jreback +mgilbert +onesandzeroes +peadarcoyle +rockg +seth +sinhrks +unutbu +wavedatalab +Åsmund +Hjulstad +Rosenfeld +Sipos +Artemy +Kolchinsky +Letson +Horel +Reback +Joris +Bossche +Sanghee +Hoyer +Aiyong +behzad +nouri +immerrr +jnmclarty +jreback +pallav +fdsi +unutbu +Greenhall +Artemy +Kolchinsky +behzad +nouri +Sauer +benjamin +Thyreau +bjonen +Stoafer +dlovell +dsm +Herrero +Hsiaoming +Huan +hunterowens +Hyungtae +immerrr +Slavitt +ischwabacher +Schaer +Tratner +Farnham +jmorris +jnmclarty +Bradish +Joerg +Rittinger +Joris +Bossche +jreback +klonuo +lexual +mcjcode +Schatzow +Mortada +Mehyar +mtrbean +Typanski +onesandzeroes +Masurel +Battiston +rockg +Petchler +seth +Shahul +Hameed +Shashank +Agarwal +sinhrks +someben +stahlous +stas +sl +Hoyer +thatneat +alcorn +Augspurger +unutbu +Yevgeniy +Grechka +Yoshiki +VÃ +zquez +Baeza +zachcp +Rosenfeld +Quistorff +Wignall +bwignall +clham +Waeber +Bew +dsm +helger +immerrr +Schaer +jaimefrio +Reaver +Joris +Bossche +jreback +Julien +Danjou +lexual +Wittmann +Mortada +Mehyar +onesandzeroes +rockg +sanguineturtle +Schaer +seth +sinhrks +Hoyer +Kluyver +yelite +hexbin +Acanthostega +agijsberts +akittredge +Gaudio +Rothberg +Rosenfeld +ankostis +anomrake +Mazières +anton +bashtage +Sauer +benjamin +Buran +bwignall +cgohlke +chebee +clham +Birken +danielballan +Waeber +Drapala +Gouthaman +Balaraman +Poulin +hshimizu +hugo +immerrr +ischwabacher +Schaer +jaimefrio +Sexauer +Reback +Tratner +Reaver +Joris +Bossche +jreback +jsexauer +Júlio +kdiether +Jordahl +Wittmann +Grender +Gruen +michaelws +mikebailey +Nipun +Batra +ojdo +onesandzeroes +phaebz +Battiston +Carnevale +ribonoous +Gibboni +rockg +sinhrks +Seabold +Hoyer +Cera +Augspurger +unutbu +westurner +Yaroslav +Halchenko +lexual +danbirken +travis +Billington +Cobzarenco +Gamboa +Cavazos +Gaudecker +Gerigk +Yaroslav +Halchenko +sharey +Vytautas +Jancauskas +Hammerbacher +Hilboll +Luc +Kesters +JanSchulz +Negusse +Wouter +Overmeire +Reeson +Aman +Thakral +Uga +Vandenbussche +Pinxing +astype +Buglet +Beltrame +Hilboll +Jev +Kuznetsov +Wouter +Overmeire +Reyfman +Joon +Ro +Uga +Vandenbussche +setupegg +Hammerbacher +Jev +Kuznetsov +Wouter +Overmeire +Aman +Thakral +Uga +Vandenbussche +carljv +rsamson +newaxis +Fortunov +Aman +Thakral +Beltrame +Wouter +Overmeire +rsamson +Laserson +Pentreath +Joon +Ro +Uga +Fortunov +Berka +Vandenbussche +krogh +akima +BPoly +isna +kurt diff --git a/doc/source/options.rst b/doc/source/options.rst index 48247eb48baaf..697cc0682e39a 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -149,7 +149,7 @@ More information can be found in the `ipython documentation Frequently Used Options ----------------------- -The following is a walkthrough of the more frequently used display options. +The following is a walk-through of the more frequently used display options. ``display.max_rows`` and ``display.max_columns`` sets the maximum number of rows and columns displayed when a frame is pretty-printed. Truncated diff --git a/doc/source/release.rst b/doc/source/release.rst index 32db2ff5ebb24..04c499ff6797b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2429,7 +2429,7 @@ New Features - ``plot(kind='kde')`` now accepts the optional parameters ``bw_method`` and ``ind``, passed to scipy.stats.gaussian_kde() (for scipy >= 0.11.0) to set - the bandwidth, and to gkde.evaluate() to specify the indicies at which it + the bandwidth, and to gkde.evaluate() to specify the indices at which it is evaluated, respectively. See scipy docs. (:issue:`4298`) - Added ``isin`` method to DataFrame (:issue:`4211`) - ``df.to_clipboard()`` learned a new ``excel`` keyword that let's you @@ -2540,7 +2540,7 @@ Improvements to existing features - ``read_json`` now raises a (more informative) ``ValueError`` when the dict contains a bad key and ``orient='split'`` (:issue:`4730`, :issue:`4838`) - ``read_stata`` now accepts Stata 13 format (:issue:`4291`) -- ``ExcelWriter`` and ``ExcelFile`` can be used as contextmanagers. +- ``ExcelWriter`` and ``ExcelFile`` can be used as context managers. (:issue:`3441`, :issue:`4933`) - ``pandas`` is now tested with two different versions of ``statsmodels`` (0.4.3 and 0.5.0) (:issue:`4981`). @@ -2553,7 +2553,7 @@ Improvements to existing features that cannot be concatenated (:issue:`4608`). - Add ``halflife`` option to exponentially weighted moving functions (PR :issue:`4998`) -- ``to_dict`` now takes ``records`` as a possible outtype. Returns an array +- ``to_dict`` now takes ``records`` as a possible out type. Returns an array of column-keyed dictionaries. (:issue:`4936`) - ``tz_localize`` can infer a fall daylight savings transition based on the structure of unlocalized data (:issue:`4230`) @@ -2664,13 +2664,13 @@ API Changes - ``select_as_coordinates`` will now return an ``Int64Index`` of the resultant selection set - support ``timedelta64[ns]`` as a serialization type (:issue:`3577`) - - store `datetime.date` objects as ordinals rather then timetuples to avoid + - store `datetime.date` objects as ordinals rather then time-tuples to avoid timezone issues (:issue:`2852`), thanks @tavistmorph and @numpand - ``numexpr`` 2.2.2 fixes incompatibility in PyTables 2.4 (:issue:`4908`) - ``flush`` now accepts an ``fsync`` parameter, which defaults to ``False`` (:issue:`5364`) - ``unicode`` indices not supported on ``table`` formats (:issue:`5386`) - - pass thru store creation arguments; can be used to support in-memory stores + - pass through store creation arguments; can be used to support in-memory stores - ``JSON`` - added ``date_unit`` parameter to specify resolution of timestamps. @@ -2736,7 +2736,7 @@ API Changes created when passing floating values in index creation. This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the same. Indexing on other index types - are preserved (and positional fallback for ``[],ix``), with the exception, + are preserved (and positional fall back for ``[],ix``), with the exception, that floating point slicing on indexes on non ``Float64Index`` will raise a ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`,:issue:`5375`) - Make Categorical repr nicer (:issue:`4368`) @@ -2765,7 +2765,7 @@ API Changes (:issue:`5339`) - default for `display.max_seq_len` is now 100 rather then `None`. This activates truncated display ("...") of long sequences in various places. (:issue:`3391`) -- **All** division with ``NDFrame`` - likes is now truedivision, regardless +- **All** division with ``NDFrame`` - likes is now true division, regardless of the future import. You can use ``//`` and ``floordiv`` to do integer division. @@ -2787,7 +2787,7 @@ API Changes dtype: float64 - raise/warn ``SettingWithCopyError/Warning`` exception/warning when setting of a - copy thru chained assignment is detected, settable via option ``mode.chained_assignment`` + copy through chained assignment is detected, settable via option ``mode.chained_assignment`` - test the list of ``NA`` values in the csv parser. add ``N/A``, ``#NA`` as independent default na values (:issue:`5521`) - The refactoring involving``Series`` deriving from ``NDFrame`` breaks ``rpy2<=2.3.8``. an Issue @@ -2888,7 +2888,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` (datetime/timedelta/time etc.) into a separate, cleaned up wrapper class. (:issue:`4613`) - Complex compat for ``Series`` with ``ndarray``. (:issue:`4819`) -- Removed unnecessary ``rwproperty`` from codebase in favor of builtin +- Removed unnecessary ``rwproperty`` from code base in favor of builtin property. (:issue:`4843`) - Refactor object level numeric methods (mean/sum/min/max...) from object level modules to ``core/generic.py`` (:issue:`4435`). @@ -3014,7 +3014,7 @@ Bug Fixes - Fix boolean indexing on an empty series loses index names (:issue:`4235`), infer_dtype works with empty arrays. - Fix reindexing with multiple axes; if an axes match was not replacing the - current axes, leading to a possible lazay frequency inference issue + current axes, leading to a possible lazy frequency inference issue (:issue:`3317`) - Fixed issue where ``DataFrame.apply`` was reraising exceptions incorrectly (causing the original stack trace to be truncated). @@ -3036,7 +3036,7 @@ Bug Fixes (:issue:`4727`) - Fix some inconsistencies with ``Index.rename`` and ``MultiIndex.rename``, etc. (:issue:`4718`, :issue:`4628`) -- Bug in using ``iloc/loc`` with a cross-sectional and duplicate indicies +- Bug in using ``iloc/loc`` with a cross-sectional and duplicate indices (:issue:`4726`) - Bug with using ``QUOTE_NONE`` with ``to_csv`` causing ``Exception``. (:issue:`4328`) @@ -3171,7 +3171,7 @@ Bug Fixes - Fixed bug in Excel writers where frames with duplicate column names weren't written correctly. (:issue:`5235`) - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) -- Fixed seg fault in C parser caused by passing more names than columns in +- Fixed segfault in C parser caused by passing more names than columns in the file. (:issue:`5156`) - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) - C and Python Parser can now handle the more common multi-index column @@ -3377,7 +3377,7 @@ API Changes - more consistency in the to_datetime return types (give string/array of string inputs) (:issue:`3888`) - The internal ``pandas`` class hierarchy has changed (slightly). The previous ``PandasObject`` now is called ``PandasContainer`` and a new - ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + ``PandasObject`` has become the base class for ``PandasContainer`` as well as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) @@ -3729,7 +3729,7 @@ Bug Fixes - Bug in value_counts of ``datetime64[ns]`` Series (:issue:`3002`) - Fixed printing of ``NaT`` in an index - Bug in idxmin/idxmax of ``datetime64[ns]`` Series with ``NaT`` (:issue:`2982`) -- Bug in ``icol, take`` with negative indicies was producing incorrect return +- Bug in ``icol, take`` with negative indices was producing incorrect return values (see :issue:`2922`, :issue:`2892`), also check for out-of-bounds indices (:issue:`3029`) - Bug in DataFrame column insertion when the column creation fails, existing frame is left in an irrecoverable state (:issue:`3010`) @@ -3752,7 +3752,7 @@ Bug Fixes - Fix upsampling bug with closed='left' and daily to daily data (:issue:`3020`) - Fixed missing tick bars on scatter_matrix plot (:issue:`3063`) - Fixed bug in Timestamp(d,tz=foo) when d is date() rather then datetime() (:issue:`2993`) -- series.plot(kind='bar') now respects pylab color schem (:issue:`3115`) +- series.plot(kind='bar') now respects pylab color scheme (:issue:`3115`) - Fixed bug in reshape if not passed correct input, now raises TypeError (:issue:`2719`) - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (:issue:`3282`) - Fix NameError issue on RESO_US (:issue:`2787`) @@ -3790,7 +3790,7 @@ Bug Fixes a simple index (:issue:`2893`) - Fix Python ASCII file parsing when integer falls outside of floating point spacing (:issue:`3258`) -- fixed pretty priniting of sets (:issue:`3294`) +- fixed pretty printing of sets (:issue:`3294`) - Panel() and Panel.from_dict() now respects ordering when give OrderedDict (:issue:`3303`) - DataFrame where with a datetimelike incorrectly selecting (:issue:`3311`) - Ensure index casts work even in Int64Index @@ -3837,7 +3837,7 @@ Improvements to existing features keyword to append - support automagic indexing via ``index`` keyword to append - support ``expectedrows`` keyword in append to inform ``PyTables`` about - the expected tablesize + the expected table size - support ``start`` and ``stop`` keywords in select to limit the row selection space - added ``get_store`` context manager to automatically import with pandas @@ -3908,7 +3908,7 @@ Bug Fixes - Fix setitem on a Series with a boolean key and a non-scalar as value (:issue:`2686`) - Box datetime64 values in Series.apply/map (:issue:`2627`, :issue:`2689`) -- Upconvert datetime + datetime64 values when concatenating frames (:issue:`2624`) +- Up convert datetime + datetime64 values when concatenating frames (:issue:`2624`) - Raise a more helpful error message in merge operations when one DataFrame has duplicate columns (:issue:`2649`) - Fix partial date parsing issue occurring only when code is run at EOM @@ -4115,7 +4115,7 @@ Bug Fixes datetime64 when calling DataFrame.apply. (:issue:`2374`) - Raise exception when calling to_panel on non uniquely-indexed frame (:issue:`2441`) - Improved detection of console encoding on IPython zmq frontends (:issue:`2458`) -- Preserve time zone when .append-ing two time series (:issue:`2260`) +- Preserve time zone when .appending two time series (:issue:`2260`) - Box timestamps when calling reset_index on time-zone-aware index rather than creating a tz-less datetime64 column (:issue:`2262`) - Enable searching non-string columns in DataFrame.filter(like=...) (:issue:`2467`) @@ -4359,7 +4359,7 @@ Bug Fixes - Fix DatetimeIndex.isin to function properly (:issue:`1763`) - Fix conversion of array of tz-aware datetime.datetime to DatetimeIndex with right time zone (:issue:`1777`) -- Fix DST issues with generating ancxhored date ranges (:issue:`1778`) +- Fix DST issues with generating anchored date ranges (:issue:`1778`) - Fix issue calling sort on result of Series.unique (:issue:`1807`) - Fix numerical issue leading to square root of negative number in rolling_std (:issue:`1840`) @@ -4612,14 +4612,14 @@ New Features - Add keys() method on DataFrame (:issue:`1240`) - Add new ``match`` function to API (similar to R) (:issue:`502`) - Add dayfirst option to parsers (:issue:`854`) -- Add ``method`` argument to ``align`` method for forward/backward fillin +- Add ``method`` argument to ``align`` method for forward/backward filling (:issue:`216`) - Add Panel.transpose method for rearranging axes (:issue:`695`) - Add new ``cut`` function (patterned after R) for discretizing data into equal range-length bins or arbitrary breaks of your choosing (:issue:`415`) - Add new ``qcut`` for cutting with quantiles (:issue:`1378`) - Add ``value_counts`` top level array method (:issue:`1392`) -- Added Andrews curves plot tupe (:issue:`1325`) +- Added Andrews curves plot type (:issue:`1325`) - Add lag plot (:issue:`1440`) - Add autocorrelation_plot (:issue:`1425`) - Add support for tox and Travis CI (:issue:`1382`) @@ -4690,7 +4690,7 @@ API Changes - Remove deprecated DataMatrix name - Default merge suffixes for overlap now have underscores instead of periods to facilitate tab completion, etc. (:issue:`1239`) -- Deprecation of offset, time_rule timeRule parameters throughout codebase +- Deprecation of offset, time_rule timeRule parameters throughout code base - Series.append and DataFrame.append no longer check for duplicate indexes by default, add verify_integrity parameter (:issue:`1394`) - Refactor Factor class, old constructor moved to Factor.from_array @@ -4879,7 +4879,7 @@ Bug Fixes - Fix combineAdd NotImplementedError for SparseDataFrame (:issue:`887`) - Fix DataFrame.to_html encoding and columns (:issue:`890`, :issue:`891`, :issue:`909`) - Fix na-filling handling in mixed-type DataFrame (:issue:`910`) -- Fix to DataFrame.set_value with non-existant row/col (:issue:`911`) +- Fix to DataFrame.set_value with non-existent row/col (:issue:`911`) - Fix malformed block in groupby when excluding nuisance columns (:issue:`916`) - Fix inconsistent NA handling in dtype=object arrays (:issue:`925`) - Fix missing center-of-mass computation in ewmcov (:issue:`862`) @@ -4935,7 +4935,7 @@ Bug Fixes - Fix indexing operation for floating point values (:issue:`780`, :issue:`798`) - Fix groupby case resulting in malformed dataframe (:issue:`814`) - Fix behavior of reindex of Series dropping name (:issue:`812`) -- Improve on redudant groupby computation (:issue:`775`) +- Improve on redundant groupby computation (:issue:`775`) - Catch possible NA assignment to int/bool series with exception (:issue:`839`) pandas 0.7.0 @@ -5116,7 +5116,7 @@ Bug Fixes - Raise exception in out-of-bounds indexing of Series instead of seg-faulting, regression from earlier releases (:issue:`495`) - Fix error when joining DataFrames of different dtypes within the same - typeclass (e.g. float32 and float64) (:issue:`486`) + type class (e.g. float32 and float64) (:issue:`486`) - Fix bug in Series.min/Series.max on objects like datetime.datetime (GH :issue:`487`) - Preserve index names in Index.union (:issue:`501`) @@ -5162,7 +5162,7 @@ Bug Fixes - Format floats to default to same number of digits (:issue:`395`) - Added decorator to copy docstring from one function to another (:issue:`449`) - Fix error in monotonic many-to-one left joins -- Fix __eq__ comparison between DateOffsets with different relativedelta +- Fix __eq__ comparison between DateOffsets with different relative delta keywords passed - Fix exception caused by parser converter returning strings (:issue:`583`) - Fix MultiIndex formatting bug with integer names (:issue:`601`) @@ -5461,7 +5461,7 @@ Improvements to existing features `Series.map` significantly when passed elementwise Python function, motivated by :issue:`355` - Cythonized `cache_readonly`, resulting in substantial micro-performance - enhancements throughout the codebase (:issue:`361`) + enhancements throughout the code base (:issue:`361`) - Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) - Add `raw` option to `DataFrame.apply` for getting better performance when @@ -5751,7 +5751,7 @@ pandas 0.4.3 **Release date:** 10/9/2011 -This is largely a bugfix release from 0.4.2 but also includes a handful of new +This is largely a bug fix release from 0.4.2 but also includes a handful of new and enhanced features. Also, pandas can now be installed and used on Python 3 (thanks Thomas Kluyver!). @@ -5803,7 +5803,7 @@ Bug Fixes - Fix Python ndarray access in Cython code for sparse blocked index integrity check - Fix bug writing Series to CSV in Python 3 (:issue:`209`) -- Miscellaneous Python 3 bugfixes +- Miscellaneous Python 3 bug fixes Thanks ~~~~~~ @@ -5828,7 +5828,7 @@ New Features int64-based time series (e.g. using NumPy's datetime64 one day) and also faster operations on DataFrame objects storing record array-like data. - Refactored `Index` classes to have a `join` method and associated data - alignment routines throughout the codebase to be able to leverage optimized + alignment routines throughout the code base to be able to leverage optimized joining / merging routines. - Added `Series.align` method for aligning two series with choice of join method @@ -6164,7 +6164,7 @@ API Changes - Removed `pandas.core.pytools` module. Code has been moved to `pandas.core.common` - Tacked on `groupName` attribute for groups in GroupBy renamed to `name` -- Panel/LongPanel `dims` attribute renamed to `shape` to be more conformant +- Panel/LongPanel `dims` attribute renamed to `shape` to be more conforming - Slicing a `Series` returns a view now - More Series deprecations / renaming: `toCSV` to `to_csv`, `asOf` to `asof`, `merge` to `map`, `applymap` to `apply`, `toDict` to `to_dict`, diff --git a/doc/source/spelling_wordlist.txt b/doc/source/spelling_wordlist.txt new file mode 100644 index 0000000000000..4c355a1b9c435 --- /dev/null +++ b/doc/source/spelling_wordlist.txt @@ -0,0 +1,916 @@ +IPython +ipython +numpy +NumPy +Reindexing +reindexing +ga +fe +reindexed +automagic +Histogramming +histogramming +concat +resampling +iterables +sparsified +df +loc +gc +Timeseries +ndarrays +ndarray +dtype +dtypes +dtyped +reindex +sliceable +timedelta +Timedeltas +timedeltas +subpackages +subpackage +filepath +io +nthreads +kwargs +kwarg +arg +args +Datetimelike +datetime +datetimes +tz +builtin +NaN +nan +behaviour +quantiling +aggregators +aggregator +Dtypes +groupby +GroupBy +Tablewise +Elementwise +ufunc +ufuncs +dict +namedtuples +namedtuple +iterrows +upcasted +upcasting +upcast +searchsorted +downcasting +Likert +categoricals +Groupby +Unioning +csv +Upcase +resampling +Upcase +Lowcase +Propcase +Interop +Stata +stata +bysort +Spearman +Wikipedia +debiasing +docstrings +docstring +Docstrings +autosummary +linting +toolchain +Appveyor +Akogun +online +pdf +reStructuredText +reST +backticks +cpus +str +idxmin +mins +agg +DataFrame +dataframes +NaT +len +Statsmodels +Bokeh +Protovis +Seaborn +Wickham +shareability +apps +app +Plotly +Spyder +Fama +Eurostat +organisations +Geopandas +Dask +Scikit +backends +Engarde +Cyberpandas +Accessor +Numba +optimising +Cython +cython +cythonizing +cythonized +Vectorize +ol +subclassing +IPv +iteritems +itertuples +dt +upcast +subsetting +programmatically +stderr +scipy +SparseArray +doctests +nd +refactored +Jit +stdout +Typeclass +Pythonic +zscore +SQL +broadcastable +resample +resamples +groupbys +metaprogramming +upcast +un +dropna +ints +int +boxplot +groupwise +indices +pre +datetimelike +dev +gd +colname +intemname +nd +isin +backporting +admin +Debian +Ubuntu +Centos +RHEL +xlsx +xz +ftp +impl +timespans +pre +Regex +regex +sortedness +delim +usecols +skipinitialspace +skiprows +skipfooter +nrows +na +iso +dayfirst +chunksize +gz +bz +lineterminator +quotechar +doublequote +escapechar +tupleize +prepended +colspecs +NONNUMERIC +serializer +localhost +json +strtod +deserialization +Hadoop +ns +stringified +xclip +xsel +gtk +gtpy +Msgpacks +msgpack +msgpacks +foo +ptrepack +sqlalchemy +sqlite +Sqlite +dta +bdat +netCDF +backend +deserialising +deserializing +qtpy +indexables +itemsize +de +sas +Miniconda +itemname +ndims +ndim +mergands +Timeseries +timeseries +asof +Nans +DataFrames +fillna +ffill +bfill +alignable +sim +py +ipy +colheader +yearfirst +repr +EngFormatter +frontends +frontend +longtable +multirow +cline +clines +colwidth +Sparsify +html +pprint +mathjax +Jupyter +xls +xlsm +hdf +numexpr +matplotlib +timedeltas +lexual +danbirken +isnull +Timestamp +np +xs +locs +datelike +dups +recarray +setitem +rhs +gaussian +kde +gkde +fwf +iNf +astyping +vbench +lgautier +jnothman +roundtrip +xlrd +buf +jtratner +tavistmorph +numpand +unserialiable +tseries +mul +completers +refactor +Refactor +subclassed +consolidatable +setitem +DataFrame +klass +jtratner +bs +lxml +rockg +inplace +pyt +tslib +vals +pos +cparser +locs +repr'd +cumsum +cumprod +rhs +datetimeindex +reraising +iloc +setitem +lhs +ticklocs +ticklabels +immerrr +np +kwds +travis +ci +yarikoptic +setitem +delitem +cpcloud +pprinting +hoechenberger +Faq +FAQ +faq +mtkini +spearman +SleepingPills +astypes +cov +timedeltalike +weekmasks +Weekmasks +xlrd +unioning +uint +iget +applymap +stonebig +recarrays +tdsmith +tokenization +google +xN +sharex +famafrench +strptime +stephenwlin +nans +diff +ohlc +util +seg +getitem +queryables +Dataframe +idxmax +putmasking +argsort +unsampling +pylab +fromordinal +andrews +strftime +wb +gzipped +gzip +aggfunc +multithreading +unicode +bork +tokenizer +sortlevel +Scikits +isnull +ndpanel +notnul +ctor +tzinfo +tzoffset +endianness +Upsampling +upsampling +upsampled +locators +locator +astimezone +iget +qcut +ewma +icol +printoption +quantileTS +UTC +utc +bool +init +OLS +Isnull +nansum +Cythonize +extlinks +utcoffset +khash +kendall +tolist +unhandled +downsampling +dayofyear +setops +discretizing +klib +ylabel +bday +BDay +timeRule +unmergeable +navar +pyplot +multiindex +combineAdd +ewmcov +algos +unpickling +MultiIndex +Memoize +Unbox +nanops +vectorize +DataFame +fallback +sharey +xlabel +notnull +asfreq +crit +rpy +nanvar +ddof +ols +printoptions +rankdata +pyo +camelCased +cacheable +unindexed +reduceat +blosc +aggregatable +idx +tradeoff +nPeriods +camelCasing +camelCased +LongPanel +truediv +px +parseCSV +unpivoted +extractall +weekofyear +dayofweek +CDay +Nano +parameterised +sunday +monday +tuesday +friday +upsample +resampled +tzfile +bools +xlsxwriter +ggplot +Colormaps +colormaps +trippable +callables +pivotting +GBQ +intersphinx +hashable +compat +Compat +rollforward +seekable +endian +subrecords +readonly +orderedness +eval +datetimelikes +pytables +argmax +argmin +utf +segfault +segfaults +xlims +CPython +MultiIndexed +blosc +blosclz +hc +lz +zlib +zstd +tput +boxplot +UInt +unioned +hashtable +saslib +resampled +dicts +datetimetz +ascii +evals +Compat +lexsorted +errstate +incompat +boxplots +honour +UTF +subclasse +ungrouped +xport +writeable +unencodable +serialising +serialise +Segfault +ceiled +xarray +jupyter +ified +isoformat +downsample +upsample +aggregator +ascii +compat +src +ness +unencoded +submethods +gbq +vectorised +nanos +Bigquery +complib +overridable +xlabels +xticklabels +listlike +jobComplete +cummin +cummax +undeprecated +triang +errored +unpickle +ngroups +multiindexes +xticks +yticks +errorbars +barplots +rcParams +dfs +nw +Openpyxl +barh +timestamp +inv +Welford +tarball +hdfstore +Pandonic +Perf +factorizer +sharey +yyyy +dd +xxx +bdays +nfrequencies +XYZ +Vytautas +Jancauskas +rankdata +Astype +astyped +mergesort +nano +unpickled +dataframe +serialised +serialisation +numpies +deserialize +hashtables +unpivoting +cubehelix +unparsable +fu +Unpivots +rownames +retbins +objs +sep +stubnames +expr +func +skipna +halflife +cond +ceil +fillchar +swapcased +deletechars +figsize +bw +xlabelsize +ftypes +ge +Unpivots +lsuffix +fname +fo +ftypes +rsuffix +sparsifying +tup +cls +nonunique +xrange +periodIndex +pytz +ctime +dst +localtime +proleptic +tzname +stddev +resampler +Resampler +searchpath +cmap +visualising +figsize +desc +Iterable +da +ta +CategoricalIndex +specialised +takeable +iter +upcase +Outlier +fontsize +pearson +corrwith +eq +ewm +floordiv +ftype +iat +typeR +slinear +krogh +akima +BPoly +isna +kurt +le +lt +ne +notna +nsmallest +Deutsche +Colormap +colorbar +silverman +gridsize +radd +rdiv +regexes +rfloordiv +rmod +rmul +rpow +rsub +rtruediv +RandomState +sem +quicksort +heapsort +organised +swapaxes +swaplevel +OAuth +defaultdict +tablename +HDFStore +appendable +searchable +serialisable +lzo +usepackage +booktabs +coereced +spellcheck +misspelt +rcl +multicolumns +gfc +automagically +fastparquet +brotli +sql +nullable +performant +lexsorted +tw +latin +StrL +tshift +basestring +DatetimeIndex +periodIndex +pydatetime +perioddelta +ExcelFile +noqa +deepcopy +Discretize +hasnans +nbytes +nlevels +DateOffset +stringr +orderable +IntervalIndex +versionadded +lexsort +droplevel +swaplevel +kurt +IGNORECASE +findall +isalnum +isalpha +isdecimal +isdigit +islower +isnumeric +isspace +istitle +isupper +ljust +lstrip +rfind +rindex +rpartition +rsplit +rstrip +startswith +deletechars +whitespaces +insecable +stringr +zfill +tshift +SparseSeries +isoweekday +isocalendar +fromtimestamp +dateutil +utcfromtimestamp +utcnow +utctimetuple +api +ExtensionArray +nbytes +abc +ABCMeta +Typecode +ExtensionDtype +biufcmMOSUV +accessor +CategoricalDtype +DataFrameGroupBy +Weekmask +walkthrough +wieldy +stubnames +unix +asian +Eg +recomputation +useQueryCache +LocalPath +fspath +params +datatypes +connectable +multirows +sparsify +parseable +TimedeltaIndex +baz +pathlib +radviz +axvline +xtick +unpivot +StataWriter +StataReader +IndexSlice +uuid +cellstyle +tablewise +rowwise +columnwise +env +fba +Regexp +sparsify +multiline +UnsupportedFunctionCall +UnsortedIndexError +PerformanceWarning +ParserWarning +ParserError +OutOfBoundsDatetime +EmptyDataError +DtypeWarning +crosstab +SeriesGroupBy +nunique +nlargest +Truthy +cumcount +ngroup +bdate +toordinal +julian +timetz +timetuple +freqstr +daysinmonth +asm +TimedeltaIndex +pytimedelta +autodetect +coords +endswith +SparseDataFrame +spmatrix +swapcase +rjust +ndarrary +regexs +ptp +imag +gca +keywors +intercalary +daysinmonth +divmod +autocorr +asobject +Argsorts +xrot +RangeIndex +PeriodIndex +qyear +timeries +scikits +fromDict +levshape +putmask +asi +repl \ No newline at end of file diff --git a/doc/source/text.rst b/doc/source/text.rst index 4af64d9f791cc..34bb1a07dfc08 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -55,8 +55,8 @@ Since ``df.columns`` is an Index object, we can use the ``.str`` accessor df.columns.str.lower() These string methods can then be used to clean up the columns as needed. -Here we are removing leading and trailing whitespaces, lowercasing all names, -and replacing any remaining whitespaces with underscores: +Here we are removing leading and trailing white spaces, lower casing all names, +and replacing any remaining white spaces with underscores: .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 73e3e721aad71..f1011f7c5c3c6 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1738,7 +1738,7 @@ If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ... ValueError: Input has different freq from Period(freq=H) -If ``Period`` has other freqs, only the same ``offsets`` can be added. Otherwise, ``ValueError`` will be raised. +If ``Period`` has other frequencies, only the same ``offsets`` can be added. Otherwise, ``ValueError`` will be raised. .. ipython:: python diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 09a52ee527cb5..17197b805e86a 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -1061,7 +1061,7 @@ Plot Formatting Setting the plot style ~~~~~~~~~~~~~~~~~~~~~~ -From version 1.5 and up, matplotlib offers a range of preconfigured plotting styles. Setting the +From version 1.5 and up, matplotlib offers a range of pre-configured plotting styles. Setting the style can be used to easily give plots the general look that you want. Setting the style is as easy as calling ``matplotlib.style.use(my_plot_style)`` before creating your plot. For example you could write ``matplotlib.style.use('ggplot')`` for ggplot-style diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index 3fc05158b7fe7..3a269e53a2404 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -370,7 +370,7 @@ Updated PyTables Support df1.get_dtype_counts() - performance improvements on table writing -- support for arbitrarily indexed dimensions +- support for arbitrary indexed dimensions - ``SparseSeries`` now has a ``density`` property (:issue:`2384`) - enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument to strip arbitrary characters (:issue:`2411`) diff --git a/doc/source/whatsnew/v0.10.1.txt b/doc/source/whatsnew/v0.10.1.txt index 2d5843101dec2..bb405c283ba24 100644 --- a/doc/source/whatsnew/v0.10.1.txt +++ b/doc/source/whatsnew/v0.10.1.txt @@ -149,7 +149,7 @@ combined result, by using ``where`` on a selector table. `nan`. - You can pass ``index`` to ``append``. This defaults to ``True``. This will - automagically create indicies on the *indexables* and *data columns* of the + automagically create indices on the *indexables* and *data columns* of the table - You can pass ``chunksize=an integer`` to ``append``, to change the writing @@ -157,7 +157,7 @@ combined result, by using ``where`` on a selector table. on writing. - You can pass ``expectedrows=an integer`` to the first ``append``, to set the - TOTAL number of expectedrows that ``PyTables`` will expected. This will + TOTAL number of expected rows that ``PyTables`` will expected. This will optimize read/write performance. - ``Select`` now supports passing ``start`` and ``stop`` to provide selection @@ -191,7 +191,7 @@ combined result, by using ``where`` on a selector table. levels with a very large number of combinatorial values (:issue:`2684`) - Fixed bug that causes plotting to fail when the index is a DatetimeIndex with a fixed-offset timezone (:issue:`2683`) -- Corrected businessday subtraction logic when the offset is more than 5 bdays +- Corrected business day subtraction logic when the offset is more than 5 bdays and the starting date is on a weekend (:issue:`2680`) - Fixed C file parser behavior when the file has more columns than data (:issue:`2668`) diff --git a/doc/source/whatsnew/v0.11.0.txt b/doc/source/whatsnew/v0.11.0.txt index b90a597815ec5..3c9cfda49aebd 100644 --- a/doc/source/whatsnew/v0.11.0.txt +++ b/doc/source/whatsnew/v0.11.0.txt @@ -33,7 +33,7 @@ three types of multi-axis indexing. See more at :ref:`Selection by Label <indexing.label>` -- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of the axis), will raise ``IndexError`` when the requested indicies are out of bounds. Allowed inputs are: +- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of the axis), will raise ``IndexError`` when the requested indices are out of bounds. Allowed inputs are: - An integer e.g. ``5`` - A list or array of integers ``[4, 3, 0]`` @@ -44,7 +44,7 @@ three types of multi-axis indexing. - ``.ix`` supports mixed integer and label based access. It is primarily label based, but will fallback to integer positional access. ``.ix`` is the most general and will support any of the inputs to ``.loc`` and ``.iloc``, as well as support for floating point label schemes. ``.ix`` is especially useful when dealing with mixed positional and label - based hierarchial indexes. + based hierarchical indexes. As using integer slices with ``.ix`` have different behavior depending on whether the slice is interpreted as position based or label based, it's usually better to be @@ -211,7 +211,7 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT API changes ~~~~~~~~~~~ - - Added to_series() method to indicies, to facilitate the creation of indexers + - Added to_series() method to indices, to facilitate the creation of indexers (:issue:`3275`) - ``HDFStore`` diff --git a/doc/source/whatsnew/v0.12.0.txt b/doc/source/whatsnew/v0.12.0.txt index ad33c49792d9f..69483b18a5490 100644 --- a/doc/source/whatsnew/v0.12.0.txt +++ b/doc/source/whatsnew/v0.12.0.txt @@ -73,7 +73,7 @@ API changes e.g. a boolean Series, even with integer labels, will raise. Since ``iloc`` is purely positional based, the labels on the Series are not alignable (:issue:`3631`) - This case is rarely used, and there are plently of alternatives. This preserves the + This case is rarely used, and there are plenty of alternatives. This preserves the ``iloc`` API to be *purely* positional based. .. ipython:: python @@ -166,7 +166,7 @@ API changes - The internal ``pandas`` class hierarchy has changed (slightly). The previous ``PandasObject`` now is called ``PandasContainer`` and a new - ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + ``PandasObject`` has become the base class for ``PandasContainer`` as well as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) @@ -296,7 +296,7 @@ Other Enhancements df.replace(regex=r'\s*\.\s*', value=np.nan) to replace all occurrences of the string ``'.'`` with zero or more - instances of surrounding whitespace with ``NaN``. + instances of surrounding white space with ``NaN``. Regular string replacement still works as expected. For example, you can do @@ -403,7 +403,7 @@ Bug Fixes :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which - elimnates any spurious plots from showing up. + eliminates any spurious plots from showing up. - ``fillna`` methods now raise a ``TypeError`` if the ``value`` parameter is a list or tuple. diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt index 02ddc362255ec..94cd451196ead 100644 --- a/doc/source/whatsnew/v0.13.0.txt +++ b/doc/source/whatsnew/v0.13.0.txt @@ -414,7 +414,7 @@ HDFStore API Changes - add the keyword ``dropna=True`` to ``append`` to change whether ALL nan rows are not written to the store (default is ``True``, ALL nan rows are NOT written), also settable via the option ``io.hdf.dropna_table`` (:issue:`4625`) -- pass thru store creation arguments; can be used to support in-memory stores +- pass through store creation arguments; can be used to support in-memory stores DataFrame repr Changes ~~~~~~~~~~~~~~~~~~~~~~ @@ -443,7 +443,7 @@ Enhancements - Clipboard functionality now works with PySide (:issue:`4282`) - Added a more informative error message when plot arguments contain overlapping color and style arguments (:issue:`4402`) -- ``to_dict`` now takes ``records`` as a possible outtype. Returns an array +- ``to_dict`` now takes ``records`` as a possible out type. Returns an array of column-keyed dictionaries. (:issue:`4936`) - ``NaN`` handing in get_dummies (:issue:`4446`) with `dummy_na` diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt index 92c699017fc13..4408470c52feb 100644 --- a/doc/source/whatsnew/v0.14.0.txt +++ b/doc/source/whatsnew/v0.14.0.txt @@ -78,10 +78,10 @@ API changes - ``df.iloc[len(df)::-1]`` now enumerates all elements in reverse - The :meth:`DataFrame.interpolate` keyword ``downcast`` default has been changed from ``infer`` to - ``None``. This is to preseve the original dtype unless explicitly requested otherwise (:issue:`6290`). + ``None``. This is to preserve the original dtype unless explicitly requested otherwise (:issue:`6290`). - When converting a dataframe to HTML it used to return `Empty DataFrame`. This special case has been removed, instead a header with the column names is returned (:issue:`6062`). -- ``Series`` and ``Index`` now internall share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are +- ``Series`` and ``Index`` now internally share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are now supported on ``Index`` types as well. The ``Series.weekday`` property from is removed from Series for API consistency. Using a ``DatetimeIndex/PeriodIndex`` method on a Series will now raise a ``TypeError``. (:issue:`4551`, :issue:`4056`, :issue:`5519`, :issue:`6380`, :issue:`7206`). @@ -294,7 +294,7 @@ Display Changes Text Parsing API Changes ~~~~~~~~~~~~~~~~~~~~~~~~ -:func:`read_csv`/:func:`read_table` will now be noiser w.r.t invalid options rather than falling back to the ``PythonParser``. +:func:`read_csv`/:func:`read_table` will now be noisier w.r.t invalid options rather than falling back to the ``PythonParser``. - Raise ``ValueError`` when ``sep`` specified with ``delim_whitespace=True`` in :func:`read_csv`/:func:`read_table` @@ -714,7 +714,7 @@ Deprecations Use the `percentiles` keyword instead, which takes a list of percentiles to display. The default output is unchanged. -- The default return type of :func:`boxplot` will change from a dict to a matpltolib Axes +- The default return type of :func:`boxplot` will change from a dict to a matplotlib Axes in a future release. You can use the future behavior now by passing ``return_type='axes'`` to boxplot. @@ -781,7 +781,7 @@ Enhancements noon, January 1, 4713 BC. Because nanoseconds are used to define the time in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) - ``DataFrame.to_stata`` will now check data for compatibility with Stata data types - and will upcast when needed. When it is not possible to losslessly upcast, a warning + and will upcast when needed. When it is not possible to lossless upcast, a warning is issued (:issue:`6327`) - ``DataFrame.to_stata`` and ``StataWriter`` will accept keyword arguments time_stamp and data_label which allow the time stamp and dataset label to be set when creating a @@ -881,7 +881,7 @@ Bug Fixes - Prevent segfault due to MultiIndex not being supported in HDFStore table format (:issue:`1848`) - Bug in ``pd.DataFrame.sort_index`` where mergesort wasn't stable when ``ascending=False`` (:issue:`6399`) -- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeroes (:issue:`6391`) +- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeros (:issue:`6391`) - Bug in version string gen. for dev versions with shallow clones / install from tarball (:issue:`6127`) - Inconsistent tz parsing ``Timestamp`` / ``to_datetime`` for current year (:issue:`5958`) - Indexing bugs with reordered indexes (:issue:`6252`, :issue:`6254`) @@ -922,7 +922,7 @@ Bug Fixes - Bug in ``Series.reindex`` when specifying a ``method`` with some nan values was inconsistent (noted on a resample) (:issue:`6418`) - Bug in :meth:`DataFrame.replace` where nested dicts were erroneously depending on the order of dictionary keys and values (:issue:`5338`). -- Perf issue in concatting with empty objects (:issue:`3259`) +- Performance issue in concatenating with empty objects (:issue:`3259`) - Clarify sorting of ``sym_diff`` on ``Index`` objects with ``NaN`` values (:issue:`6444`) - Regression in ``MultiIndex.from_product`` with a ``DatetimeIndex`` as input (:issue:`6439`) - Bug in ``str.extract`` when passed a non-default index (:issue:`6348`) @@ -966,8 +966,8 @@ Bug Fixes - Bug in downcasting inference with empty arrays (:issue:`6733`) - Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) -- Bug in ``DataFrame.replace()`` where regex metacharacters were being treated - as regexs even when ``regex=False`` (:issue:`6777`). +- Bug in ``DataFrame.replace()`` where regex meta characters were being treated + as regex even when ``regex=False`` (:issue:`6777`). - Bug in timedelta ops on 32-bit platforms (:issue:`6808`) - Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) - Bug in expressions.py where numexpr would try to evaluate arithmetic ops @@ -983,7 +983,7 @@ Bug Fixes would only replace the first occurrence of a value (:issue:`6689`) - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) - Bug in ``Series.__unicode__`` when ``max_rows=None`` and the Series has more than 1000 rows. (:issue:`6863`) -- Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) +- Bug in ``groupby.get_group`` where a datelike wasn't always accepted (:issue:`5267`) - Bug in ``groupBy.get_group`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`6914`) - Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` converting ``NaT`` incorrectly (:issue:`5546`) - Bug in arithmetic operations affecting ``NaT`` (:issue:`6873`) @@ -994,7 +994,7 @@ Bug Fixes - Bug in ``DataFrame.plot`` and ``Series.plot``, where the legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) - Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) -- Bug in C parser with leading whitespace (:issue:`3374`) +- Bug in C parser with leading white space (:issue:`3374`) - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt index 32a2391c75531..f7f69218e0ef5 100644 --- a/doc/source/whatsnew/v0.14.1.txt +++ b/doc/source/whatsnew/v0.14.1.txt @@ -172,7 +172,7 @@ Bug Fixes - Bug in Panel indexing with a multi-index axis (:issue:`7516`) - Regression in datetimelike slice indexing with a duplicated index and non-exact end-points (:issue:`7523`) - Bug in setitem with list-of-lists and single vs mixed types (:issue:`7551`:) -- Bug in timeops with non-aligned Series (:issue:`7500`) +- Bug in time ops with non-aligned Series (:issue:`7500`) - Bug in timedelta inference when assigning an incomplete Series (:issue:`7592`) - Bug in groupby ``.nth`` with a Series and integer-like column name (:issue:`7559`) - Bug in ``Series.get`` with a boolean accessor (:issue:`7407`) @@ -209,7 +209,7 @@ Bug Fixes - Bug in inferred_freq results in None for eastern hemisphere timezones (:issue:`7310`) - Bug in ``Easter`` returns incorrect date when offset is negative (:issue:`7195`) - Bug in broadcasting with ``.div``, integer dtypes and divide-by-zero (:issue:`7325`) -- Bug in ``CustomBusinessDay.apply`` raiases ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) +- Bug in ``CustomBusinessDay.apply`` raises ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) - Bug in ``.loc`` with a list of indexers on a single-multi index level (that is not nested) (:issue:`7349`) - Bug in ``Series.map`` when mapping a dict with tuple keys of different lengths (:issue:`7333`) diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index 0f1a8c324de54..94093b2cfb16c 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -44,7 +44,7 @@ users upgrade to this version. .. warning:: - The refactorings in :class:`~pandas.Categorical` changed the two argument constructor from + The refactoring in :class:`~pandas.Categorical` changed the two argument constructor from "codes/labels and levels" to "values and levels (now called 'categories')". This can lead to subtle bugs. If you use :class:`~pandas.Categorical` directly, please audit your code before updating to this pandas version and change it to use the :meth:`~pandas.Categorical.from_codes` constructor. See more on ``Categorical`` :ref:`here <whatsnew_0150.cat>` @@ -139,7 +139,7 @@ This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a The arguments to ``pd.to_timedelta`` are now ``(arg,unit='ns',box=True,coerce=False)``, previously were ``(arg,box=True,unit='ns')`` as these are more logical. -Consruct a scalar +Construct a scalar .. ipython:: python @@ -794,7 +794,7 @@ Other notable API changes: .. _whatsnew_0150.blanklines: - Made both the C-based and Python engines for `read_csv` and `read_table` ignore empty lines in input as well as - whitespace-filled lines, as long as ``sep`` is not whitespace. This is an API change + white space-filled lines, as long as ``sep`` is not white space. This is an API change that can be controlled by the keyword parameter ``skip_blank_lines``. See :ref:`the docs <io.skiplines>` (:issue:`4466`) - A timeseries/index localized to UTC when inserted into a Series/DataFrame will preserve the UTC timezone @@ -940,7 +940,7 @@ Enhancements Enhancements in the importing/exporting of Stata files: -- Added support for bool, uint8, uint16 and uint32 datatypes in ``to_stata`` (:issue:`7097`, :issue:`7365`) +- Added support for bool, uint8, uint16 and uint32 data types in ``to_stata`` (:issue:`7097`, :issue:`7365`) - Added conversion option when importing Stata files (:issue:`8527`) - ``DataFrame.to_stata`` and ``StataWriter`` check string length for compatibility with limitations imposed in dta files where fixed-width @@ -988,7 +988,7 @@ Other: - Added ``split`` as an option to the ``orient`` argument in ``pd.DataFrame.to_dict``. (:issue:`7840`) - The ``get_dummies`` method can now be used on DataFrames. By default only - catagorical columns are encoded as 0's and 1's, while other columns are + categorical columns are encoded as 0's and 1's, while other columns are left untouched. .. ipython:: python @@ -1070,7 +1070,7 @@ Other: idx.duplicated() idx.drop_duplicates() -- add ``copy=True`` argument to ``pd.concat`` to enable pass thru of complete blocks (:issue:`8252`) +- add ``copy=True`` argument to ``pd.concat`` to enable pass through of complete blocks (:issue:`8252`) - Added support for numpy 1.8+ data types (``bool_``, ``int_``, ``float_``, ``string_``) for conversion to R dataframe (:issue:`8400`) diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index 345fc9f1b5da7..918eab3a9763e 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -72,7 +72,7 @@ API changes df.groupby(ts, as_index=False).max() -- ``groupby`` will not erroneously exclude columns if the column name conflics +- ``groupby`` will not erroneously exclude columns if the column name conflicts with the grouper name (:issue:`8112`): .. ipython:: python diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index f1dfab0f57ed3..16a57676c89c0 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -165,7 +165,7 @@ Other enhancements: - Added support for ``utcfromtimestamp()``, ``fromtimestamp()``, and ``combine()`` on `Timestamp` class (:issue:`5351`). - Added Google Analytics (`pandas.io.ga`) basic documentation (:issue:`8835`). See `here <http://pandas.pydata.org/pandas-docs/version/0.15.2/remote_data.html#remote-data-ga>`__. - ``Timedelta`` arithmetic returns ``NotImplemented`` in unknown cases, allowing extensions by custom classes (:issue:`8813`). -- ``Timedelta`` now supports arithemtic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`). +- ``Timedelta`` now supports arithmetic with ``numpy.ndarray`` objects of the appropriate dtype (numpy 1.8 or newer only) (:issue:`8884`). - Added ``Timedelta.to_timedelta64()`` method to the public API (:issue:`8884`). - Added ``gbq.generate_bq_schema()`` function to the gbq module (:issue:`8325`). - ``Series`` now works with map objects the same way as generators (:issue:`8909`). @@ -173,7 +173,7 @@ Other enhancements: - ``to_datetime`` gains an ``exact`` keyword to allow for a format to not require an exact match for a provided format string (if its ``False``). ``exact`` defaults to ``True`` (meaning that exact matching is still the default) (:issue:`8904`) - Added ``axvlines`` boolean option to parallel_coordinates plot function, determines whether vertical lines will be printed, default is True - Added ability to read table footers to read_html (:issue:`8552`) -- ``to_sql`` now infers datatypes of non-NA values for columns that contain NA values and have dtype ``object`` (:issue:`8778`). +- ``to_sql`` now infers data types of non-NA values for columns that contain NA values and have dtype ``object`` (:issue:`8778`). .. _whatsnew_0152.performance: @@ -215,7 +215,7 @@ Bug Fixes - ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo and when it receives no data from Yahoo (:issue:`8761`), (:issue:`8783`). - Fix: The font size was only set on x axis if vertical or the y axis if horizontal. (:issue:`8765`) - Fixed division by 0 when reading big csv files in python 3 (:issue:`8621`) -- Bug in outputting a Multindex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) +- Bug in outputting a MultiIndex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) - Imported categorical variables from Stata files retain the ordinal information in the underlying data (:issue:`8836`). - Defined ``.size`` attribute across ``NDFrame`` objects to provide compat with numpy >= 1.9.1; buggy with ``np.array_split`` (:issue:`8846`) - Skip testing of histogram plots for matplotlib <= 1.2 (:issue:`8648`). @@ -230,11 +230,11 @@ Bug Fixes - Bug where index name was still used when plotting a series with ``use_index=False`` (:issue:`8558`). - Bugs when trying to stack multiple columns, when some (or all) of the level names are numbers (:issue:`8584`). - Bug in ``MultiIndex`` where ``__contains__`` returns wrong result if index is not lexically sorted or unique (:issue:`7724`) -- BUG CSV: fix problem with trailing whitespace in skipped rows, (:issue:`8679`), (:issue:`8661`), (:issue:`8983`) +- BUG CSV: fix problem with trailing white space in skipped rows, (:issue:`8679`), (:issue:`8661`), (:issue:`8983`) - Regression in ``Timestamp`` does not parse 'Z' zone designator for UTC (:issue:`8771`) - Bug in `StataWriter` the produces writes strings with 244 characters irrespective of actual size (:issue:`8969`) - Fixed ValueError raised by cummin/cummax when datetime64 Series contains NaT. (:issue:`8965`) -- Bug in Datareader returns object dtype if there are missing values (:issue:`8980`) +- Bug in DataReader returns object dtype if there are missing values (:issue:`8980`) - Bug in plotting if sharex was enabled and index was a timeseries, would show labels on multiple axes (:issue:`3964`). - Bug where passing a unit to the TimedeltaIndex constructor applied the to nano-second conversion twice. (:issue:`9011`). - Bug in plotting of a period-like array (:issue:`9012`) diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 48af06d124f2e..214a08ef0bbff 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -133,7 +133,7 @@ from a ``scipy.sparse.coo_matrix``: String Methods Enhancements ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- Following new methods are accesible via ``.str`` accessor to apply the function to each values. This is intended to make it more consistent with standard methods on strings. (:issue:`9282`, :issue:`9352`, :issue:`9386`, :issue:`9387`, :issue:`9439`) +- Following new methods are accessible via ``.str`` accessor to apply the function to each values. This is intended to make it more consistent with standard methods on strings. (:issue:`9282`, :issue:`9352`, :issue:`9386`, :issue:`9387`, :issue:`9439`) ============= ============= ============= =============== =============== .. .. Methods .. .. @@ -530,7 +530,7 @@ Deprecations We refer users to the external package `pandas-qt <https://github.com/datalyze-solutions/pandas-qt>`_. (:issue:`9615`) - The ``pandas.rpy`` interface is deprecated and will be removed in a future version. - Similar functionaility can be accessed thru the `rpy2 <http://rpy2.bitbucket.org/>`_ project (:issue:`9602`) + Similar functionality can be accessed through the `rpy2 <http://rpy2.bitbucket.org/>`_ project (:issue:`9602`) - Adding ``DatetimeIndex/PeriodIndex`` to another ``DatetimeIndex/PeriodIndex`` is being deprecated as a set-operation. This will be changed to a ``TypeError`` in a future version. ``.union()`` should be used for the union set operation. (:issue:`9094`) - Subtracting ``DatetimeIndex/PeriodIndex`` from another ``DatetimeIndex/PeriodIndex`` is being deprecated as a set-operation. This will be changed to an actual numeric subtraction yielding a ``TimeDeltaIndex`` in a future version. ``.difference()`` should be used for the differencing set operation. (:issue:`9094`) @@ -601,7 +601,7 @@ Bug Fixes - Bug in binary operator method (eg ``.mul()``) alignment with integer levels (:issue:`9463`). - Bug in boxplot, scatter and hexbin plot may show an unnecessary warning (:issue:`8877`) - Bug in subplot with ``layout`` kw may show unnecessary warning (:issue:`9464`) -- Bug in using grouper functions that need passed thru arguments (e.g. axis), when using wrapped function (e.g. ``fillna``), (:issue:`9221`) +- Bug in using grouper functions that need passed through arguments (e.g. axis), when using wrapped function (e.g. ``fillna``), (:issue:`9221`) - ``DataFrame`` now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in ``read_csv`` when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - ``isnull`` now detects ``NaT`` in ``PeriodIndex`` (:issue:`9129`) @@ -613,7 +613,7 @@ Bug Fixes - Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) - Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format was applied. This prevented other row or column formatting being applied. (:issue:`9167`) - Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) -- Bug where ``wide_to_long`` would modify the input stubnames list (:issue:`9204`) +- Bug where ``wide_to_long`` would modify the input stub names list (:issue:`9204`) - Bug in ``to_sql`` not storing float64 values using double precision. (:issue:`9009`) - ``SparseSeries`` and ``SparsePanel`` now accept zero argument constructors (same as their non-sparse counterparts) (:issue:`9272`). - Regression in merging ``Categorical`` and ``object`` dtypes (:issue:`9426`) @@ -624,7 +624,7 @@ Bug Fixes - Fixed bug with reading CSV files from Amazon S3 on python 3 raising a TypeError (:issue:`9452`) - Bug in the Google BigQuery reader where the 'jobComplete' key may be present but False in the query results (:issue:`8728`) - Bug in ``Series.values_counts`` with excluding ``NaN`` for categorical type ``Series`` with ``dropna=True`` (:issue:`9443`) -- Fixed mising numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) +- Fixed missing numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) - Support constructing ``Panel`` or ``Panel4D`` with scalar data (:issue:`8285`) - ``Series`` text representation disconnected from `max_rows`/`max_columns` (:issue:`7508`). diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index 5c716f6ad45c1..e2da12fc94b58 100644 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -133,7 +133,7 @@ groupby operations on the index will preserve the index nature as well reindexing operations, will return a resulting index based on the type of the passed indexer, meaning that passing a list will return a plain-old-``Index``; indexing with a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories -of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with +of the PASSED ``Categorical`` dtype. This allows one to arbitrary index these even with values NOT in the categories, similarly to how you can reindex ANY pandas index. .. code-block:: ipython @@ -237,7 +237,7 @@ enhancements make string operations easier and more consistent with standard pyt idx.str.startswith('a') s[s.index.str.startswith('a')] -- The following new methods are accesible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) +- The following new methods are accessible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) ================ =============== =============== =============== ================ .. .. Methods .. .. @@ -348,7 +348,7 @@ Deprecations Index Representation ~~~~~~~~~~~~~~~~~~~~ -The string representation of ``Index`` and its sub-classes have now been unified. These will show a single-line display if there are few values; a wrapped multi-line display for a lot of values (but less than ``display.max_seq_items``; if lots of items (> ``display.max_seq_items``) will show a truncated display (the head and tail of the data). The formatting for ``MultiIndex`` is unchanges (a multi-line wrapped display). The display width responds to the option ``display.max_seq_items``, which is defaulted to 100. (:issue:`6482`) +The string representation of ``Index`` and its sub-classes have now been unified. These will show a single-line display if there are few values; a wrapped multi-line display for a lot of values (but less than ``display.max_seq_items``; if lots of items (> ``display.max_seq_items``) will show a truncated display (the head and tail of the data). The formatting for ``MultiIndex`` is unchanged (a multi-line wrapped display). The display width responds to the option ``display.max_seq_items``, which is defaulted to 100. (:issue:`6482`) Previous Behavior @@ -437,8 +437,8 @@ Bug Fixes - Bug in ``to_msgpack`` and ``read_msgpack`` zlib and blosc compression support (:issue:`9783`) - Bug ``GroupBy.size`` doesn't attach index name properly if grouped by ``TimeGrouper`` (:issue:`9925`) - Bug causing an exception in slice assignments because ``length_of_indexer`` returns wrong results (:issue:`9995`) -- Bug in csv parser causing lines with initial whitespace plus one non-space character to be skipped. (:issue:`9710`) -- Bug in C csv parser causing spurious NaNs when data started with newline followed by whitespace. (:issue:`10022`) +- Bug in csv parser causing lines with initial white space plus one non-space character to be skipped. (:issue:`9710`) +- Bug in C csv parser causing spurious NaNs when data started with newline followed by white space. (:issue:`10022`) - Bug causing elements with a null group to spill into the final group when grouping by a ``Categorical`` (:issue:`9603`) - Bug where .iloc and .loc behavior is not consistent on empty dataframes (:issue:`9964`) - Bug in invalid attribute access on a ``TimedeltaIndex`` incorrectly raised ``ValueError`` instead of ``AttributeError`` (:issue:`9680`) diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt index 29f6832b48aaf..047da4c94093b 100644 --- a/doc/source/whatsnew/v0.16.2.txt +++ b/doc/source/whatsnew/v0.16.2.txt @@ -125,7 +125,7 @@ Bug Fixes - Bug where ``HDFStore.select`` modifies the passed columns list (:issue:`7212`) - Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`) - Bug in ``to_json`` with certain orients and a ``CategoricalIndex`` would segfault (:issue:`10317`) -- Bug where some of the nan funcs do not have consistent return dtypes (:issue:`10251`) +- Bug where some of the nan functions do not have consistent return dtypes (:issue:`10251`) - Bug in ``DataFrame.quantile`` on checking that a valid axis was passed (:issue:`9543`) - Bug in ``groupby.apply`` aggregation for ``Categorical`` not preserving categories (:issue:`10138`) - Bug in ``to_csv`` where ``date_format`` is ignored if the ``datetime`` is fractional (:issue:`10209`) @@ -155,7 +155,7 @@ Bug Fixes - Bug in ``GroupBy.get_group`` raises ``ValueError`` when group key contains ``NaT`` (:issue:`6992`) - Bug in ``SparseSeries`` constructor ignores input data name (:issue:`10258`) - Bug in ``Categorical.remove_categories`` causing a ``ValueError`` when removing the ``NaN`` category if underlying dtype is floating-point (:issue:`10156`) -- Bug where infer_freq infers timerule (WOM-5XXX) unsupported by to_offset (:issue:`9425`) +- Bug where infer_freq infers time rule (WOM-5XXX) unsupported by to_offset (:issue:`9425`) - Bug in ``DataFrame.to_hdf()`` where table format would raise a seemingly unrelated error for invalid (non-string) column names. This is now explicitly forbidden. (:issue:`9057`) - Bug to handle masking empty ``DataFrame`` (:issue:`10126`). - Bug where MySQL interface could not handle numeric table/column names (:issue:`10255`) diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index ec8f318b72fef..1b98ebd0e19c5 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -308,7 +308,7 @@ See the :ref:`documentation <io.excel>` for more details. os.remove('test.xlsx') Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel``, -if the serialized data had index names. For version 0.17.0 the ouptput format of ``to_excel`` +if the serialized data had index names. For version 0.17.0 the output format of ``to_excel`` has been changed to make this keyword unnecessary - the change is shown below. **Old** @@ -1042,7 +1042,7 @@ Performance Improvements Bug Fixes ~~~~~~~~~ -- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) +- Bug in incorrect computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) - Bug in ``.isin`` on older numpies (:issue:`11232`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`) diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index e1b561c4deacb..990f27950d982 100644 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -41,7 +41,7 @@ Conditional HTML Formatting We've added *experimental* support for conditional HTML formatting: the visual styling of a DataFrame based on the data. The styling is accomplished with HTML and CSS. -Acesses the styler class with the :attr:`pandas.DataFrame.style`, attribute, +Accesses the styler class with the :attr:`pandas.DataFrame.style`, attribute, an instance of :class:`~pandas.core.style.Styler` with your data attached. Here's a quick example: diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt index bfd314639aa60..8dc49dbc319a6 100644 --- a/doc/source/whatsnew/v0.18.0.txt +++ b/doc/source/whatsnew/v0.18.0.txt @@ -330,7 +330,7 @@ Timedeltas t[0].round('2h') -In addition, ``.round()``, ``.floor()`` and ``.ceil()`` will be available thru the ``.dt`` accessor of ``Series``. +In addition, ``.round()``, ``.floor()`` and ``.ceil()`` will be available through the ``.dt`` accessor of ``Series``. .. ipython:: python @@ -414,7 +414,7 @@ New Behavior: df.loc[ix, 'b'] = df.loc[ix, 'b'] df.dtypes -When a DataFrame's integer slice is partially updated with a new slice of floats that could potentially be downcasted to integer without losing precision, the dtype of the slice will be set to float instead of integer. +When a DataFrame's integer slice is partially updated with a new slice of floats that could potentially be down-casted to integer without losing precision, the dtype of the slice will be set to float instead of integer. Previous Behavior: @@ -516,19 +516,19 @@ Other enhancements - ``Series`` gained an ``is_unique`` attribute (:issue:`11946`) - ``DataFrame.quantile`` and ``Series.quantile`` now accept ``interpolation`` keyword (:issue:`10174`). - Added ``DataFrame.style.format`` for more flexible formatting of cell values (:issue:`11692`) -- ``DataFrame.select_dtypes`` now allows the ``np.float16`` typecode (:issue:`11990`) +- ``DataFrame.select_dtypes`` now allows the ``np.float16`` type code (:issue:`11990`) - ``pivot_table()`` now accepts most iterables for the ``values`` parameter (:issue:`12017`) - Added Google ``BigQuery`` service account authentication support, which enables authentication on remote servers. (:issue:`11881`, :issue:`12572`). For further details see `here <https://pandas-gbq.readthedocs.io/en/latest/intro.html>`__ - ``HDFStore`` is now iterable: ``for k in store`` is equivalent to ``for k in store.keys()`` (:issue:`12221`). - Add missing methods/fields to ``.dt`` for ``Period`` (:issue:`8848`) -- The entire codebase has been ``PEP``-ified (:issue:`12096`) +- The entire code base has been ``PEP``-ified (:issue:`12096`) .. _whatsnew_0180.api_breaking: Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- the leading whitespaces have been removed from the output of ``.to_string(index=False)`` method (:issue:`11833`) +- the leading white spaces have been removed from the output of ``.to_string(index=False)`` method (:issue:`11833`) - the ``out`` parameter has been removed from the ``Series.round()`` method. (:issue:`11763`) - ``DataFrame.round()`` leaves non-numeric columns unchanged in its return, rather than raises. (:issue:`11885`) - ``DataFrame.head(0)`` and ``DataFrame.tail(0)`` return empty frames, rather than ``self``. (:issue:`11937`) @@ -1186,7 +1186,7 @@ Performance Improvements - Improved performance in construction of ``Categoricals`` with ``Series`` of datetimes containing ``NaT`` (:issue:`12077`) -- Improved performance of ISO 8601 date parsing for dates without separators (:issue:`11899`), leading zeros (:issue:`11871`) and with whitespace preceding the time zone (:issue:`9714`) +- Improved performance of ISO 8601 date parsing for dates without separators (:issue:`11899`), leading zeros (:issue:`11871`) and with white space preceding the time zone (:issue:`9714`) diff --git a/doc/source/whatsnew/v0.18.1.txt b/doc/source/whatsnew/v0.18.1.txt index de9a5d5d8afae..34921505a46bf 100644 --- a/doc/source/whatsnew/v0.18.1.txt +++ b/doc/source/whatsnew/v0.18.1.txt @@ -106,7 +106,7 @@ Now you can do: .. _whatsnew_0181.enhancements.method_chain: -Method chaininng improvements +Method chaining improvements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following methods / indexers now accept a ``callable``. It is intended to make @@ -598,14 +598,14 @@ Bug Fixes - Bug in ``.resample(...)`` with a ``PeriodIndex`` when resampling to an existing frequency (:issue:`12770`) - Bug in printing data which contains ``Period`` with different ``freq`` raises ``ValueError`` (:issue:`12615`) - Bug in ``Series`` construction with ``Categorical`` and ``dtype='category'`` is specified (:issue:`12574`) -- Bugs in concatenation with a coercable dtype was too aggressive, resulting in different dtypes in outputformatting when an object was longer than ``display.max_rows`` (:issue:`12411`, :issue:`12045`, :issue:`11594`, :issue:`10571`, :issue:`12211`) +- Bugs in concatenation with a coercible dtype was too aggressive, resulting in different dtypes in output formatting when an object was longer than ``display.max_rows`` (:issue:`12411`, :issue:`12045`, :issue:`11594`, :issue:`10571`, :issue:`12211`) - Bug in ``float_format`` option with option not being validated as a callable. (:issue:`12706`) - Bug in ``GroupBy.filter`` when ``dropna=False`` and no groups fulfilled the criteria (:issue:`12768`) - Bug in ``__name__`` of ``.cum*`` functions (:issue:`12021`) - Bug in ``.astype()`` of a ``Float64Inde/Int64Index`` to an ``Int64Index`` (:issue:`12881`) -- Bug in roundtripping an integer based index in ``.to_json()/.read_json()`` when ``orient='index'`` (the default) (:issue:`12866`) +- Bug in round tripping an integer based index in ``.to_json()/.read_json()`` when ``orient='index'`` (the default) (:issue:`12866`) - Bug in plotting ``Categorical`` dtypes cause error when attempting stacked bar plot (:issue:`13019`) -- Compat with >= ``numpy`` 1.11 for ``NaT`` comparions (:issue:`12969`) +- Compat with >= ``numpy`` 1.11 for ``NaT`` comparisons (:issue:`12969`) - Bug in ``.drop()`` with a non-unique ``MultiIndex``. (:issue:`12701`) - Bug in ``.concat`` of datetime tz-aware and naive DataFrames (:issue:`12467`) - Bug in correctly raising a ``ValueError`` in ``.resample(..).fillna(..)`` when passing a non-string (:issue:`12952`) @@ -673,7 +673,7 @@ Bug Fixes - Bug in ``pd.concat`` raises ``AttributeError`` when input data contains tz-aware datetime and timedelta (:issue:`12620`) - Bug in ``pd.concat`` did not handle empty ``Series`` properly (:issue:`11082`) -- Bug in ``.plot.bar`` alginment when ``width`` is specified with ``int`` (:issue:`12979`) +- Bug in ``.plot.bar`` alignment when ``width`` is specified with ``int`` (:issue:`12979`) - Bug in ``fill_value`` is ignored if the argument to a binary operator is a constant (:issue:`12723`) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 50d7877a9cd48..73fb124afef87 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -386,7 +386,7 @@ Google BigQuery Enhancements Fine-grained numpy errstate ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas codebase. (:issue:`13109`, :issue:`13145`) +Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas code base. (:issue:`13109`, :issue:`13145`) After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from your code. These are likely legitimate, and the underlying cause likely existed in the code when using previous versions of pandas that simply silenced the warning. Use `numpy.errstate <http://docs.scipy.org/doc/numpy/reference/generated/numpy.errstate.html>`__ around the source of the ``RuntimeWarning`` to control how these conditions are handled. @@ -750,7 +750,7 @@ This will now convert integers/floats with the default unit of ``ns``. Bug fixes related to ``.to_datetime()``: - Bug in ``pd.to_datetime()`` when passing integers or floats, and no ``unit`` and ``errors='coerce'`` (:issue:`13180`). -- Bug in ``pd.to_datetime()`` when passing invalid datatypes (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`) +- Bug in ``pd.to_datetime()`` when passing invalid data types (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`) - Bug in ``pd.to_datetime()`` which overflowed on ``int8``, and ``int16`` dtypes (:issue:`13451`) - Bug in ``pd.to_datetime()`` raise ``AttributeError`` with ``NaN`` and the other string is not valid when ``errors='ignore'`` (:issue:`12424`) - Bug in ``pd.to_datetime()`` did not cast floats correctly when ``unit`` was specified, resulting in truncated datetime (:issue:`13834`) @@ -1512,7 +1512,7 @@ Bug Fixes - Bug in ``.set_index`` raises ``AmbiguousTimeError`` if new index contains DST boundary and multi levels (:issue:`12920`) - Bug in ``.shift`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13926`) - Bug in ``pd.read_hdf()`` returns incorrect result when a ``DataFrame`` with a ``categorical`` column and a query which doesn't match any values (:issue:`13792`) -- Bug in ``.iloc`` when indexing with a non lex-sorted MultiIndex (:issue:`13797`) +- Bug in ``.iloc`` when indexing with a non lexsorted MultiIndex (:issue:`13797`) - Bug in ``.loc`` when indexing with date strings in a reverse sorted ``DatetimeIndex`` (:issue:`14316`) - Bug in ``Series`` comparison operators when dealing with zero dim NumPy arrays (:issue:`13006`) - Bug in ``.combine_first`` may return incorrect ``dtype`` (:issue:`7630`, :issue:`10567`) diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt index b8afe18e0f871..1c577dddf1cd4 100644 --- a/doc/source/whatsnew/v0.19.1.txt +++ b/doc/source/whatsnew/v0.19.1.txt @@ -43,7 +43,7 @@ Bug Fixes - Bug in localizing an ambiguous timezone when a boolean is passed (:issue:`14402`) - Bug in ``TimedeltaIndex`` addition with a Datetime-like object where addition overflow in the negative direction was not being caught (:issue:`14068`, :issue:`14453`) - Bug in string indexing against data with ``object`` ``Index`` may raise ``AttributeError`` (:issue:`14424`) -- Corrrecly raise ``ValueError`` on empty input to ``pd.eval()`` and ``df.query()`` (:issue:`13139`) +- Correctly raise ``ValueError`` on empty input to ``pd.eval()`` and ``df.query()`` (:issue:`13139`) - Bug in ``RangeIndex.intersection`` when result is a empty set (:issue:`14364`). - Bug in groupby-transform broadcasting that could cause incorrect dtype coercion (:issue:`14457`) - Bug in ``Series.__setitem__`` which allowed mutating read-only arrays (:issue:`14359`). diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 5fb725a76770e..bd90e371597dc 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -24,7 +24,7 @@ Highlights include: .. warning:: - Pandas has changed the internal structure and layout of the codebase. + Pandas has changed the internal structure and layout of the code base. This can affect imports that are not from the top-level ``pandas.*`` namespace, please see the changes :ref:`here <whatsnew_0200.privacy>`. Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating. @@ -324,7 +324,7 @@ this JSON Table schema representation of the Series or DataFrame if you are using IPython (or another frontend like `nteract`_ using the Jupyter messaging protocol). This gives frontends like the Jupyter notebook and `nteract`_ -more flexiblity in how they display pandas objects, since they have +more flexibility in how they display pandas objects, since they have more information about the data. You must enable this by setting the ``display.html.table_schema`` option to ``True``. @@ -462,7 +462,7 @@ Selecting via a scalar value that is contained *in* the intervals. Other Enhancements ^^^^^^^^^^^^^^^^^^ -- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closedness. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`) +- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window-endpoint closed. See the :ref:`documentation <stats.rolling_window.endpoints>` (:issue:`13965`) - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`. - ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`) - ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`) @@ -1389,7 +1389,7 @@ list, and a dict of column names to scalars or lists. This provides a useful syn (potentially different) aggregations. However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent -between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility. +between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionality. - We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed one to ``rename`` the resulting aggregation, but this had a completely different @@ -1528,7 +1528,7 @@ Removal of prior version deprecations/changes - The ``pandas.io.ga`` module with a ``google-analytics`` interface is removed (:issue:`11308`). Similar functionality can be found in the `Google2Pandas <https://github.com/panalysis/Google2Pandas>`__ package. - ``pd.to_datetime`` and ``pd.to_timedelta`` have dropped the ``coerce`` parameter in favor of ``errors`` (:issue:`13602`) -- ``pandas.stats.fama_macbeth``, ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var``, as well as the top-level ``pandas.fama_macbeth`` and ``pandas.ols`` routines are removed. Similar functionaility can be found in the `statsmodels <shttp://www.statsmodels.org/dev/>`__ package. (:issue:`11898`) +- ``pandas.stats.fama_macbeth``, ``pandas.stats.ols``, ``pandas.stats.plm`` and ``pandas.stats.var``, as well as the top-level ``pandas.fama_macbeth`` and ``pandas.ols`` routines are removed. Similar functionality can be found in the `statsmodels <shttp://www.statsmodels.org/dev/>`__ package. (:issue:`11898`) - The ``TimeSeries`` and ``SparseTimeSeries`` classes, aliases of ``Series`` and ``SparseSeries``, are removed (:issue:`10890`, :issue:`15098`). - ``Series.is_time_series`` is dropped in favor of ``Series.index.is_all_dates`` (:issue:`15098`) @@ -1640,7 +1640,7 @@ I/O - Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`) - Bug in ``pd.read_csv()`` in which a file containing a row with many columns followed by rows with fewer columns would cause a crash (:issue:`14125`) - Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`) -- Bug in ``pd.read_csv()`` with ``parse_dates`` when multiline headers are specified (:issue:`15376`) +- Bug in ``pd.read_csv()`` with ``parse_dates`` when multi-line headers are specified (:issue:`15376`) - Bug in ``pd.read_csv()`` with ``float_precision='round_trip'`` which caused a segfault when a text entry is parsed (:issue:`15140`) - Bug in ``pd.read_csv()`` when an index was specified and no values were specified as null values (:issue:`15835`) - Bug in ``pd.read_csv()`` in which certain invalid file objects caused the Python interpreter to crash (:issue:`15337`) @@ -1722,7 +1722,7 @@ Numeric - Bug in ``pd.cut()`` with a single bin on an all 0s array (:issue:`15428`) - Bug in ``pd.qcut()`` with a single quantile and an array with identical values (:issue:`15431`) - Bug in ``pandas.tools.utils.cartesian_product()`` with large input can cause overflow on windows (:issue:`15265`) -- Bug in ``.eval()`` which caused multiline evals to fail with local variables not on the first line (:issue:`15342`) +- Bug in ``.eval()`` which caused multi-line evals to fail with local variables not on the first line (:issue:`15342`) Other ^^^^^ diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 2e9e616daf3a7..2c147736d79a8 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -124,7 +124,7 @@ I/O - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the ``numpy.bool_`` datatype (:issue:`18390`) -- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) +- Bug in :func:`read_json` not decoding when reading line delimited JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) - Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) - Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) @@ -139,7 +139,7 @@ Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`) -- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) +- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequency is 12h or higher (:issue:`15549`) - Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) - Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a099fb40c35a7..2430b6ac2bbd4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -243,7 +243,7 @@ Grouping by a categorical includes the unobserved categories in the output. When grouping by multiple categorical columns, this means you get the cartesian product of all the categories, including combinations where there are no observations, which can result in a large number of groups. We have added a keyword ``observed`` to control this behavior, it defaults to -``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) +``observed=False`` for backward-compatibility. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) .. ipython:: python @@ -535,8 +535,8 @@ Other Enhancements - :func:`DataFrame.replace` now supports the ``method`` parameter, which can be used to specify the replacement method when ``to_replace`` is a scalar, list or tuple and ``value`` is ``None`` (:issue:`19632`) - :meth:`Timestamp.month_name`, :meth:`DatetimeIndex.month_name`, and :meth:`Series.dt.month_name` are now available (:issue:`12805`) - :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`) -- :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row. - ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) +- :meth:`DataFrame.to_sql` now performs a multi-value insert if the underlying connection supports itk rather than inserting row by row. + ``SQLAlchemy`` dialects supporting multi-value inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - :func:`read_html` now reads all ``<tbody>`` elements in a ``<table>``, not just the first. (:issue:`20690`) - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) @@ -836,7 +836,7 @@ Extraction of matching patterns from strings By default, extracting matching patterns from strings with :func:`str.extract` used to return a ``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless -``expand`` is set to ``False``. Finallay, ``None`` was an accepted value for +``expand`` is set to ``False``. Finally, ``None`` was an accepted value for the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`) Previous Behavior: @@ -896,7 +896,7 @@ New Behavior: Notice in the example above that the converted ``Categorical`` has retained ``ordered=True``. Had the default value for ``ordered`` remained as ``False``, the converted ``Categorical`` would have become unordered, despite ``ordered=False`` never being explicitly specified. To change the value of ``ordered``, explicitly pass it to the new dtype, e.g. ``CategoricalDtype(categories=list('cbad'), ordered=False)``. -Note that the unintenional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. +Note that the unintentional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. .. _whatsnew_0230.api_breaking.pretty_printing: @@ -1107,7 +1107,7 @@ Performance Improvements - Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) - Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`) - Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) -- Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifiested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) +- Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) - Improved performance of :func:`pandas.core.arrays.Categorical.from_codes` (:issue:`18501`) @@ -1243,7 +1243,7 @@ Offsets - Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) -- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) +- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operations (:issue:`14774`) Numeric @@ -1329,9 +1329,9 @@ I/O - :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`, :issue:`9155`, :issue:`19900`) - Bug in :meth:`pandas.io.stata.StataReader.value_labels` raising an ``AttributeError`` when called on very old files. Now returns an empty dict (:issue:`19417`) - Bug in :func:`read_pickle` when unpickling objects with :class:`TimedeltaIndex` or :class:`Float64Index` created with pandas prior to version 0.20 (:issue:`19939`) -- Bug in :meth:`pandas.io.json.json_normalize` where subrecords are not properly normalized if any subrecords values are NoneType (:issue:`20030`) +- Bug in :meth:`pandas.io.json.json_normalize` where sub-records are not properly normalized if any sub-records values are NoneType (:issue:`20030`) - Bug in ``usecols`` parameter in :func:`read_csv` where error is not raised correctly when passing a string. (:issue:`20529`) -- Bug in :func:`HDFStore.keys` when reading a file with a softlink causes exception (:issue:`20523`) +- Bug in :func:`HDFStore.keys` when reading a file with a soft link causes exception (:issue:`20523`) - Bug in :func:`HDFStore.select_column` where a key which is not a valid store raised an ``AttributeError`` instead of a ``KeyError`` (:issue:`17912`) Plotting @@ -1390,7 +1390,7 @@ Reshaping - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) -- Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) +- Bug in :func:`concat` when concatenating sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - Bug in :func:`DataFrame.join` which does an ``outer`` instead of a ``left`` join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) @@ -1411,5 +1411,5 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) -- Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existant option key in some cases (:issue:`19789`) +- Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existent option key in some cases (:issue:`19789`) - Bug in :func:`testing.assert_series_equal` and :func:`testing.assert_frame_equal` for Series or DataFrames with differing unicode data (:issue:`20503`) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b94377af770f4..1626508c3ba31 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -64,7 +64,7 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ -- +- Added sphinx spelling extension, updated documentation on how to use the spell check (:issue:`21079`) - - diff --git a/doc/source/whatsnew/v0.6.0.txt b/doc/source/whatsnew/v0.6.0.txt index 55a67a75e0fd1..bd01dd0a90a59 100644 --- a/doc/source/whatsnew/v0.6.0.txt +++ b/doc/source/whatsnew/v0.6.0.txt @@ -43,7 +43,7 @@ New Features Performance Enhancements ~~~~~~~~~~~~~~~~~~~~~~~~ -- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the codebase (:issue:`361`) +- VBENCH Cythonized ``cache_readonly``, resulting in substantial micro-performance enhancements throughout the code base (:issue:`361`) - VBENCH Special Cython matrix iterator for applying arbitrary reduction operations with 3-5x better performance than `np.apply_along_axis` (:issue:`309`) - VBENCH Improved performance of ``MultiIndex.from_tuples`` - VBENCH Special Cython matrix iterator for applying arbitrary reduction operations diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt index b5ec5aa73ee9a..29d6fe563d047 100644 --- a/doc/source/whatsnew/v0.8.0.txt +++ b/doc/source/whatsnew/v0.8.0.txt @@ -33,7 +33,7 @@ clear of NumPy 1.6's datetime64 API functions (though limited as they are) and only interact with this data using the interface that pandas provides. See the end of the 0.8.0 section for a "porting" guide listing potential issues -for users migrating legacy codebases from pandas 0.7 or earlier to 0.8.0. +for users migrating legacy code bases from pandas 0.7 or earlier to 0.8.0. Bug fixes to the 0.7.x series for legacy NumPy < 1.6 users will be provided as they arise. There will be no more further development in 0.7.x beyond bug @@ -68,7 +68,7 @@ Time series changes and improvements :ref:`time spans <timeseries.periods>` and performing **calendar logic**, including the `12 fiscal quarterly frequencies <timeseries.quarterly>`. This is a partial port of, and a substantial enhancement to, - elements of the scikits.timeseries codebase. Support for conversion between + elements of the scikits.timeseries code base. Support for conversion between PeriodIndex and DatetimeIndex - New Timestamp data type subclasses `datetime.datetime`, providing the same interface while enabling working with nanosecond-resolution data. Also @@ -76,7 +76,7 @@ Time series changes and improvements - Enhanced support for :ref:`time zones <timeseries.timezone>`. Add `tz_convert` and ``tz_lcoalize`` methods to TimeSeries and DataFrame. All timestamps are stored as UTC; Timestamps from DatetimeIndex objects with time - zone set will be localized to localtime. Time zone conversions are therefore + zone set will be localized to local time. Time zone conversions are therefore essentially free. User needs to know very little about pytz library now; only time zone names as as strings are required. Time zone-aware timestamps are equal if and only if their UTC timestamps match. Operations between time diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt index e2d6d7a275086..1f58170b30244 100644 --- a/doc/source/whatsnew/v0.9.1.txt +++ b/doc/source/whatsnew/v0.9.1.txt @@ -8,7 +8,7 @@ v0.9.1 (November 14, 2012) -------------------------- -This is a bugfix release from 0.9.0 and includes several new features and +This is a bug fix release from 0.9.0 and includes several new features and enhancements along with a large number of bug fixes. The new features include by-column sort order for DataFrame and Series, improved NA handling for the rank method, masking functions for DataFrame, and intraday time-series filtering for diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 741e5553141f7..41047d9c25c22 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -304,7 +304,7 @@ cdef class IndexEngine: """ return an indexer suitable for takng from a non unique index return the labels in the same order ast the target and a missing indexer into the targets (which correspond - to the -1 indicies in the results """ + to the -1 indices in the results """ cdef: ndarray values, x diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..f93748a75e609 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -943,7 +943,7 @@ class Timedelta(_Timedelta): days, seconds, microseconds, milliseconds, minutes, hours, weeks : numeric, optional Values for construction in compat with datetime.timedelta. - np ints and floats will be coereced to python ints and floats. + np ints and floats will be coerced to python ints and floats. Notes ----- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 88bc497f9f22d..63520fdd74299 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -513,7 +513,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, See Also -------- pandas.cut : Discretize continuous-valued array. - pandas.unique : Find the unique valuse in an array. + pandas.unique : Find the unique value in an array. Examples -------- @@ -558,7 +558,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, [a, c] Categories (3, object): [a, b, c] - Notice that ``'b'`` is in ``uniques.categories``, desipite not being + Notice that ``'b'`` is in ``uniques.categories``, despite not being present in ``cat.values``. For all other pandas objects, an Index of the appropriate type is @@ -576,8 +576,8 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, @Substitution( values=dedent("""\ values : sequence - A 1-D seqeunce. Sequences that aren't pandas objects are - coereced to ndarrays before factorization. + A 1-D sequence. Sequences that aren't pandas objects are + coerced to ndarrays before factorization. """), order=dedent("""\ order @@ -1457,7 +1457,7 @@ def take(arr, indices, axis=0, allow_fill=False, fill_value=None): Parameters ---------- arr : sequence - Non array-likes (sequences without a dtype) are coereced + Non array-likes (sequences without a dtype) are coerced to an ndarray. indices : sequence of integers Indices to be taken. diff --git a/pandas/core/apply.py b/pandas/core/apply.py index ac173c5182bc7..27ac5038276d6 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -113,7 +113,7 @@ def get_result(self): if isinstance(self.f, compat.string_types): # Support for `frame.transform('method')` # Some methods (shift, etc.) require the axis argument, others - # don't, so inspect and insert if nescessary. + # don't, so inspect and insert if necessary. func = getattr(self.obj, self.f) sig = compat.signature(func) if 'axis' in sig.args: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 1922801c30719..ce87c0a8b0c5a 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -491,7 +491,7 @@ def take(self, indices, allow_fill=False, fill_value=None): `fill_value`: a user-facing "boxed" scalar, and a low-level physical NA value. `fill_value` should be the user-facing version, and the implementation should handle translating that to the - physical version for processing the take if nescessary. + physical version for processing the take if necessary. Returns ------- @@ -510,7 +510,7 @@ def take(self, indices, allow_fill=False, fill_value=None): ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when `indices` is a sequence of values. Additionally, it's called by :meth:`Series.reindex`, or any other method - that causes realignemnt, with a `fill_value`. + that causes realignment, with a `fill_value`. See Also -------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index abcb9ae3494b5..eff8c9b4f4cbf 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -51,7 +51,7 @@ _take_msg = textwrap.dedent("""\ Interpreting negative values in 'indexer' as missing values. - In the future, this will change to meaning positional indicies + In the future, this will change to meaning positional indices from the right. Use 'allow_fill=True' to retain the previous behavior and silence this @@ -1478,7 +1478,7 @@ def argsort(self, *args, **kwargs): # TODO(PY2): use correct signature # We have to do *args, **kwargs to avoid a a py2-only signature # issue since np.argsort differs from argsort. - """Return the indicies that would sort the Categorical. + """Return the indices that would sort the Categorical. Parameters ---------- diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index d9dc73434f5ac..ad4588f254174 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -33,7 +33,7 @@ def isna(obj): """ Detect missing values for an array-like object. - This function takes a scalar or array-like object and indictates + This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). @@ -52,7 +52,7 @@ def isna(obj): See Also -------- notna : boolean inverse of pandas.isna. - Series.isna : Detetct missing values in a Series. + Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. @@ -260,7 +260,7 @@ def notna(obj): """ Detect non-missing values for an array-like object. - This function takes a scalar or array-like object and indictates + This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). @@ -279,7 +279,7 @@ def notna(obj): See Also -------- isna : boolean inverse of pandas.notna. - Series.notna : Detetct valid values in a Series. + Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 77a67c048a48d..5e5cde05cafbc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1136,7 +1136,7 @@ def to_gbq(self, destination_table, project_id, chunksize=None, Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False - Force Google BigQuery to reauthenticate the user. This is useful + Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: @@ -5922,7 +5922,7 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, -------- DataFrame.applymap: For elementwise operations DataFrame.aggregate: only perform aggregating type operations - DataFrame.transform: only perform transformating type operations + DataFrame.transform: only perform transforming type operations Examples -------- @@ -6565,7 +6565,7 @@ def cov(self, min_periods=None): See Also -------- pandas.Series.cov : compute covariance with another Series - pandas.core.window.EWM.cov: expoential weighted sample covariance + pandas.core.window.EWM.cov: exponential weighted sample covariance pandas.core.window.Expanding.cov : expanding sample covariance pandas.core.window.Rolling.cov : rolling sample covariance diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9e4eda1bc4dc7..38def81e73231 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3607,7 +3607,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, If desired, we can fill in the missing values using one of several options. - For example, to backpropagate the last valid value to fill the ``NaN`` + For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') @@ -4541,7 +4541,7 @@ def as_matrix(self, columns=None): e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 - will result in a flot64 dtype. + will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. @@ -4622,7 +4622,7 @@ def values(self): See Also -------- - pandas.DataFrame.index : Retrievie the index labels + pandas.DataFrame.index : Retrieve the index labels pandas.DataFrame.columns : Retrieving the column names """ self._consolidate_inplace() @@ -5702,7 +5702,7 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): the correct type for replacement. Compare the behavior of ``s.replace({'a': None})`` and - ``s.replace('a', None)`` to understand the pecularities + ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index df7a5dc9dc173..3bc59157055ce 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2069,7 +2069,7 @@ def shift(self, periods=1, freq=None, axis=0): @Appender(_doc_template) def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, axis=0): - """Calcuate pct_change of each value to previous entry in group""" + """Calculate pct_change of each value to previous entry in group""" if freq is not None or axis != 0: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index df39eb5fd8312..82147e3ad2f38 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2629,7 +2629,7 @@ def shift(self, periods=1, freq=None): def argsort(self, *args, **kwargs): """ - Return the integer indicies that would sort the index. + Return the integer indices that would sort the index. Parameters ---------- @@ -2641,7 +2641,7 @@ def argsort(self, *args, **kwargs): Returns ------- numpy.ndarray - Integer indicies that would sort the index if used as + Integer indices that would sort the index if used as an indexer. See also diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 83950f1d71633..bc4b729cbfe15 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2046,7 +2046,7 @@ def normalize(self): """ Convert times to midnight. - The time component of the date-timeise converted to midnight i.e. + The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fbcf06a28c1e5..a9c65b7c2c864 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2141,7 +2141,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): Notes ----- - This method only works if the MultiIndex is properly lex-sorted. So, + This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index b9e8f9028dbf7..c163e3d53e634 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -587,7 +587,7 @@ def asfreq(self, freq=None, how='E'): 'S', 'START', or 'BEGIN' for start. Whether the elements should be aligned to the end or start within pa period. January 31st ('END') vs. - Janury 1st ('START') for example. + January 1st ('START') for example. Returns ------- diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 31c489e2f8941..e9b9a734ec5f5 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -761,7 +761,7 @@ def _interp_limit(invalid, fw_limit, bw_limit): """ # handle forward first; the backward direction is the same except # 1. operate on the reversed array - # 2. subtract the returned indicies from N - 1 + # 2. subtract the returned indices from N - 1 N = len(invalid) f_idx = set() b_idx = set() diff --git a/pandas/core/panel.py b/pandas/core/panel.py index fe46b8a66e5ef..c4aa471b8b944 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1405,7 +1405,7 @@ def _get_join_index(self, other, how): # miscellaneous data creation @staticmethod def _extract_axes(self, data, axes, **kwargs): - """ return a list of the axis indicies """ + """ return a list of the axis indices """ return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)] @@ -1447,11 +1447,11 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): Returns ------- - dict of aligned results & indicies + dict of aligned results & indices """ result = dict() - # caller differs dict/ODict, presered type + # caller differs dict/ODict, preserved type if isinstance(frames, OrderedDict): result = OrderedDict() diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 0707cc756682e..0b0fcacc1bc48 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -425,7 +425,7 @@ def backfill(self, limit=None): appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. - Missing values that existed in the orginal data will not be modified. + Missing values that existed in the original data will not be modified. Parameters ---------- @@ -529,7 +529,7 @@ def fillna(self, method, limit=None): appear (e.g., when the resampling frequency is higher than the original frequency). - Missing values that existed in the orginal data will + Missing values that existed in the original data will not be modified. Parameters diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index ce99d2f8c9a63..b3e3c52f6e363 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -166,7 +166,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more - group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,... + group of columns with format + A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) @@ -185,7 +186,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): i : str or list-like Column(s) to use as id variable(s) j : str - The name of the subobservation variable. What you wish to name your + The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names @@ -200,7 +201,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form - Aone, Btwo,.., and you have an unrelated column Arating, you can + A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 @@ -242,7 +243,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 - With multuple id columns + With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4d8897fb7c811..73aba4d4e044b 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -319,7 +319,7 @@ def merge_asof(left, right, on=None, - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value - (i.e., stricly less-than / strictly greater-than) + (i.e., strictly less-than / strictly greater-than) direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..c92825abf45a3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3088,7 +3088,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): -------- Series.map: For element-wise operations Series.agg: only perform aggregating type operations - Series.transform: only perform transformating type operations + Series.transform: only perform transforming type operations Examples -------- diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 81d775157cf62..cb1e8c067f537 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -207,7 +207,7 @@ def str_count(arr, pat, flags=0): Flags for the `re` module. For a complete list, `see here <https://docs.python.org/3/howto/regex.html#compilation-flags>`_. **kwargs - For compatability with other string methods. Not used. + For compatibility with other string methods. Not used. Returns ------- @@ -1358,7 +1358,7 @@ def str_split(arr, pat=None, n=None): Limit number of splits in output. ``None``, 0 and -1 will be interpreted as return all splits. expand : bool, default False - Expand the splitted strings into separate columns. + Expand the split strings into separate columns. * If ``True``, return DataFrame/MultiIndex expanding dimensionality. * If ``False``, return Series/Index, containing lists of strings. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 1de43116d0b49..8ecb81397edb3 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -138,7 +138,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse - with year first (this is a known bug, based on dateutil beahavior). + with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 @@ -181,8 +181,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime - conversion. May produce sigificant speed-up when parsing duplicate date - strings, especially ones with timezone offsets. + conversion. May produce significant speed-up when parsing duplicate + date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f876ceb8a26bf..5203cf036c146 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -60,7 +60,7 @@ class Styler(object): table_styles: list-like, default None list of {selector: (attr, value)} dicts; see Notes uuid: str, default None - a unique identifier to avoid CSS collisons; generated automatically + a unique identifier to avoid CSS collisions; generated automatically caption: str, default None caption to attach to the table @@ -79,7 +79,7 @@ class Styler(object): If using in the Jupyter notebook, Styler has defined a ``_repr_html_`` to automatically render itself. Otherwise call Styler.render to get - the genterated HTML. + the generated HTML. CSS classes are attached to the generated HTML @@ -120,7 +120,7 @@ def __init__(self, data, precision=None, table_styles=None, uuid=None, if data.ndim == 1: data = data.to_frame() if not data.index.is_unique or not data.columns.is_unique: - raise ValueError("style is not supported for non-unique indicies.") + raise ValueError("style is not supported for non-unique indices.") self.data = data self.index = data.index @@ -549,7 +549,7 @@ def _apply(self, func, axis=0, subset=None, **kwargs): def apply(self, func, axis=0, subset=None, **kwargs): """ - Apply a function column-wise, row-wise, or table-wase, + Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters @@ -1051,7 +1051,8 @@ def _bar_center_mid(s, color, width, base): def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left'): """ - Color the background ``color`` proptional to the values in each column. + Color the background ``color`` proportional to the values in each + column. Excludes non-numeric data by default. Parameters diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 236d70609e76c..c7c16598ee432 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -56,7 +56,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False - Force Google BigQuery to reauthenticate the user. This is useful + Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. private_key : str, optional Service account private key in JSON format. Can be file path diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 01f7db7d68664..6f663f8ff8433 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -219,7 +219,7 @@ def build_table_schema(data, index=True, primary_key=None, version=True): ----- See `_as_json_table_type` for conversion types. Timedeltas as converted to ISO8601 duration format with - 9 decimal places after the secnods field for nanosecond precision. + 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index daa370d0ca61a..aa39e341792c7 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -297,7 +297,7 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs): objects. .. versionadded:: 0.19.0 support for pathlib, py.path. - .. versionadded:: 0.21.0 support for __fspath__ proptocol. + .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file @@ -3790,13 +3790,13 @@ class WORMTable(Table): table_type = u('worm') def read(self, **kwargs): - """ read the indicies and the indexing array, calculate offset rows and + """ read the indices and the indexing array, calculate offset rows and return """ raise NotImplementedError("WORMTable needs to implement read") def write(self, **kwargs): """ write in a format that we can search later on (but cannot append - to): write out the indicies and the values using _write_array + to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search """ raise NotImplementedError("WORKTable needs to implement write") @@ -4694,7 +4694,7 @@ class Selection(object): ---------- table : a Table object where : list of Terms (or convertible to) - start, stop: indicies to start and/or stop selection + start, stop: indices to start and/or stop selection """ diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 87b7d13251f28..0819df97ba5fa 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1394,7 +1394,7 @@ def orientation(self): In statistics, `kernel density estimation`_ (KDE) is a non-parametric way to estimate the probability density function (PDF) of a random variable. This function uses Gaussian kernels and includes automatic - bandwith determination. + bandwidth determination. .. _kernel density estimation: https://en.wikipedia.org/wiki/Kernel_density_estimation @@ -2031,7 +2031,7 @@ def plot_series(data, kind='line', ax=None, # Series unique Tick label font size in points or as a string (e.g., `large`). rot : int or float, default 0 The rotation angle of labels (in degrees) - with respect to the screen coordinate sytem. + with respect to the screen coordinate system. grid : boolean, default True Setting this to True will show the grid. figsize : A tuple (width, height) in inches @@ -2063,7 +2063,7 @@ def plot_series(data, kind='line', ax=None, # Series unique * 'axes' : object of class matplotlib.axes.Axes * 'dict' : dict of matplotlib.lines.Line2D objects - * 'both' : a nametuple with strucure (ax, lines) + * 'both' : a namedtuple with structure (ax, lines) For data grouped with ``by``: @@ -2848,8 +2848,8 @@ def hist(self, bins=10, **kwds): >>> ax = s.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can - lead to overfitting, while using a large bandwidth value may result - in underfitting: + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: .. plot:: :context: close-figs @@ -3284,8 +3284,8 @@ def hist(self, by=None, bins=10, **kwds): >>> ax = df.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can - lead to overfitting, while using a large bandwidth value may result - in underfitting: + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: .. plot:: :context: close-figs @@ -3415,7 +3415,7 @@ def scatter(self, x, y, s=None, c=None, **kwds): - A sequence of color strings referred to by name, RGB or RGBA code, which will be used for each point's color recursively. For - intance ['green','yellow'] all points will be filled in green or + instance ['green','yellow'] all points will be filled in green or yellow, alternatively. - A column name or position whose values will be used to color the diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 6dd38187f7277..300e1acdea911 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1638,7 +1638,7 @@ def test_constructor_series_copy(self): def test_constructor_with_nas(self): # GH 5016 - # na's in indicies + # na's in indices def check(df): for i in range(len(df.columns)): diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index f0ba1851b28dd..a77c170221bea 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -130,7 +130,7 @@ def setup_method(self, method): setattr(self, o, d) def generate_indices(self, f, values=False): - """ generate the indicies + """ generate the indices if values is True , use the axis values is False, use the range """ diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index bfc74db73b813..49047e1da0996 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -391,13 +391,13 @@ def test_iloc_getitem_frame(self): expected = df.ix[[0, 2, 6], [0, 2]] tm.assert_frame_equal(result, expected) - # neg indicies + # neg indices result = df.iloc[[-1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) - # dups indicies + # dups indices result = df.iloc[[-1, -1, 1, 3], [-1, 1]] with catch_warnings(record=True): expected = df.ix[[18, 18, 2, 6], [6, 2]] diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py index 81265c9f2941d..1085e2a61be48 100644 --- a/pandas/tests/indexing/test_panel.py +++ b/pandas/tests/indexing/test_panel.py @@ -43,12 +43,12 @@ def test_iloc_getitem_panel(self): expected = p.loc[['A', 'C']] tm.assert_panel_equal(result, expected) - # neg indicies + # neg indices result = p.iloc[[-1, 1], [-1, 1]] expected = p.loc[['D', 'B'], ['c', 'b']] tm.assert_panel_equal(result, expected) - # dups indicies + # dups indices result = p.iloc[[-1, -1, 1], [-1, 1]] expected = p.loc[['D', 'D', 'B'], ['c', 'b']] tm.assert_panel_equal(result, expected) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 381a059244858..d590cfd6b6c64 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1484,7 +1484,7 @@ def test_append_with_data_columns(self): store.append('df', df[2:]) tm.assert_frame_equal(store['df'], df) - # check that we have indicies created + # check that we have indices created assert(store._handle.root.df.table.cols.index.is_indexed is True) assert(store._handle.root.df.table.cols.B.is_indexed is True) @@ -4511,7 +4511,7 @@ def do_copy(f, new_f=None, keys=None, keys = store.keys() assert set(keys) == set(tstore.keys()) - # check indicies & nrows + # check indices & nrows for k in tstore.keys(): if tstore.get_storer(k).is_table: new_t = tstore.get_storer(k) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 12d803a76e7f3..d95a2ad2d7f76 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1524,7 +1524,7 @@ def test_take(self): expected = self.panel.reindex(minor=['D', 'A', 'B', 'C']) assert_panel_equal(result, expected) - # neg indicies ok + # neg indices ok expected = self.panel.reindex(minor=['D', 'D', 'B', 'C']) result = self.panel.take([3, -1, 1, 2], axis=2) assert_panel_equal(result, expected)
This is my first time contributing to a project this big so I hope everything is okay I am still working on fixing bugs and polishing a few things but here is what I have done so far: - fixed typos - updated a wordlist txt file with words to be ignored by the spellchecker - added the spellcheck method to the DocBuilder class (should this be named spelling?) I've re-used the _sphix_build method to run the spelling command, to do this I had to update the _sphix_build method to include 'spelling' as a kind. I'd like to know if this is the best approach of If I should just replicate the code. I have added a few configuring options in order to get better results when using the spellcheck, the first one was to use the wordlist text file, the second was to ignore known PyPI packages names and finally to show suggestions of a misspelt word. At the moment the spellcheck will run if we type the command: `python make.py spellcheck` and the spellcheck method is called, I haven't yet figured out how to fail the build when there are exceptions like the Issue 21079 suggested. ---- - [x] closes #21079 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21109
2018-05-17T20:53:32Z
2018-06-07T13:20:33Z
2018-06-07T13:20:33Z
2018-06-12T07:58:06Z
PERF: Improve performance of CategoricalIndex.is_unique
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..d45b4e19c6aac 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -30,6 +30,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) +- Improved performance of :meth:`CategoricalIndex.is_unique` (:issue:`21107`) - - diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 78b7ae7054248..150eca32e229d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -378,7 +378,7 @@ def _engine(self): # introspection @cache_readonly def is_unique(self): - return not self.duplicated().any() + return self._engine.is_unique @property def is_monotonic_increasing(self): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 0e630f69b1a32..a2a4170256088 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -581,6 +581,15 @@ def test_is_monotonic(self, data, non_lexsorted_data): assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing + @pytest.mark.parametrize('values, expected', [ + ([1, 2, 3], True), + ([1, 3, 1], False), + (list('abc'), True), + (list('aba'), False)]) + def test_is_unique(self, values, expected): + ci = CategoricalIndex(values) + assert ci.is_unique is expected + def test_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo')
``CategoricalIndex.is_unique`` creates an extraneous boolean array. By changing ``CategoricalIndex.is_unique`` to use ``CategoricalIndex._engine.is_unique`` instead, this array creation is avoided. We simultaneously get to set ``is_monotonic*`` for free, and therefore will save time, if that property is called later. Demonstration ========== Setup: ```python >>> n = 1_000_000 >>> ci = pd.CategoricalIndex(list('a' * n + 'b' * n + 'c' * n)) ``` Currently, ``ci.is_unique`` is about the same (disregarding``@readonly_cache``) as: ```python >>> from pandas._libs.hashtable import duplicated_int64 >>> not duplicated_int64(ci.codes.astype('int64')).any() False >>> %timeit duplicated_int64(ci.codes.astype('int64')).any() 46.7 ms ± 4.18 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) ``` Notice that the ``duplicated_int64()`` creates an boolean array, which is not needed and slows the operation down. If we instead use ``ci._engine.is_unique`` to check for uniqueness, the check is roughly similar to: ```python >>> from pandas._libs.algos import is_monotonic_int64 >>> is_monotonic_int64(ci.codes.astype('int64'), False) (True, False, False) # (is_monotonic_inc, is_monotonic_dec, is_unique) >>> %timeit is_monotonic_int64(ci.codes.astype('int64'), False) 23.3 ms ± 364 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) ``` This is faster than the other version, as the intermediate boolean array is not created in this version. Also, is it (IMO) more idiomatic, as ``index._engine`` is in general supposed to be used for this kind of index content checks.
https://api.github.com/repos/pandas-dev/pandas/pulls/21107
2018-05-17T19:14:55Z
2018-06-04T21:43:17Z
2018-06-04T21:43:17Z
2018-10-27T08:16:21Z
Replaced open with Context Mgrs in Parser Tests
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 2423ddcd9a1a0..2b7ff1f5a9879 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -54,20 +54,21 @@ def test_bad_stream_exception(self): # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. - handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') - # stream must be binary UTF8 - stream = codecs.StreamRecoder( - handle, utf8.encode, utf8.decode, codec.streamreader, - codec.streamwriter) + if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" - with tm.assert_raises_regex(UnicodeDecodeError, msg): - self.read_csv(stream) - stream.close() + + # stream must be binary UTF8 + with open(self.csv_shiftjs, "rb") as handle, codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, + codec.streamwriter) as stream: + + with tm.assert_raises_regex(UnicodeDecodeError, msg): + self.read_csv(stream) def test_read_csv(self): if not compat.PY3: diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 01c6620e50d37..e84db66561c49 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -110,16 +110,15 @@ def test_read_csv_infer_compression(self): # see gh-9770 expected = self.read_csv(self.csv1, index_col=0, parse_dates=True) - inputs = [self.csv1, self.csv1 + '.gz', - self.csv1 + '.bz2', open(self.csv1)] + with open(self.csv1) as f: + inputs = [self.csv1, self.csv1 + '.gz', + self.csv1 + '.bz2', f] - for f in inputs: - df = self.read_csv(f, index_col=0, parse_dates=True, - compression='infer') - - tm.assert_frame_equal(expected, df) + for inp in inputs: + df = self.read_csv(inp, index_col=0, parse_dates=True, + compression='infer') - inputs[3].close() + tm.assert_frame_equal(expected, df) def test_read_csv_compressed_utf16_example(self): # GH18071 diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index ab4c14034cd20..e8d9d8b52164b 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -35,24 +35,18 @@ def setup_method(self, method): self.xls1 = os.path.join(self.dirpath, 'test.xls') def test_file_handle(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f) - result = reader.read() # noqa - finally: - f.close() + reader.read() def test_string_filename(self): reader = TextReader(self.csv1, header=None) reader.read() def test_file_handle_mmap(self): - try: - f = open(self.csv1, 'rb') + with open(self.csv1, 'rb') as f: reader = TextReader(f, memory_map=True, header=None) reader.read() - finally: - f.close() def test_StringIO(self): with open(self.csv1, 'rb') as f:
Maybe closes #21102 and #19984, though if not should still work as a general cleanup
https://api.github.com/repos/pandas-dev/pandas/pulls/21105
2018-05-17T16:08:22Z
2018-05-19T20:08:19Z
2018-05-19T20:08:18Z
2018-06-08T17:09:48Z
BUG: type aliasing is not allowed to be compared using isinstance()
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..9c19d4d6bbaad 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -48,6 +48,11 @@ Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) + +Strings +^^^^^^^ + +- Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`) - Conversion diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 12517372fedd1..5ae22694d0da7 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -425,7 +425,7 @@ def raise_with_traceback(exc, traceback=Ellipsis): # In Python 3.7, the private re._pattern_type is removed. # Python 3.5+ have typing.re.Pattern -if PY35: +if PY36: import typing re_type = typing.re.Pattern else: diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index ead9ba1e26e2d..79d3aad493182 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -4,9 +4,10 @@ """ import pytest +import re from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap, lfilter, builtins, iterkeys, itervalues, iteritems, - next, get_range_parameters, PY2) + next, get_range_parameters, PY2, re_type) class TestBuiltinIterators(object): @@ -89,3 +90,7 @@ def test_get_range_parameters(self, start, stop, step): assert start_result == start_expected assert stop_result == stop_expected assert step_result == step_expected + + +def test_re_type(): + assert isinstance(re.compile(''), re_type)
- [X] closes #21078 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry As raised in #21078, Python 3.5.4 supports the using `isinstance()` with `typing.re.Pattern` But it does not support the same method in 3.5.2. ``` Python Python 3.5.2 (default, Nov 23 2017, 16:37:01) [GCC 5.4.0 20160609] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import re >>> import typing >>> isinstance(re.compile(''), typing.re.Pattern) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/lib/python3.5/typing.py", line 260, in __instancecheck__ raise TypeError("Type aliases cannot be used with isinstance().") TypeError: Type aliases cannot be used with isinstance(). ``` This Bugfix PR is to revert the `re_type` to be back to what it used to be before the update with the new release.
https://api.github.com/repos/pandas-dev/pandas/pulls/21098
2018-05-17T04:02:09Z
2018-05-17T20:55:14Z
2018-05-17T20:55:14Z
2018-05-19T23:47:10Z
Support for OO Optimization
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35484e34ee9eb..654b27624a6fe 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -46,8 +46,6 @@ Documentation Changes Bug Fixes ~~~~~~~~~ -- tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) - Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -101,3 +99,9 @@ Reshaping - Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - + +Other +^^^^^ + +- Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) +- Bug preventing pandas from being importable with -OO optimization (:issue:`21071`) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index c28e2052bd93e..c2d09c6d49e86 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -2,6 +2,8 @@ """ Testing that we work in the downstream packages """ +import subprocess + import pytest import numpy as np # noqa from pandas import DataFrame @@ -53,6 +55,11 @@ def test_xarray(df): assert df.to_xarray() is not None +def test_oo_optimizable(): + # GH 21071 + subprocess.check_call(["python", "-OO", "-c", "import pandas"]) + + @tm.network def test_statsmodels(): diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 749165f894819..c294110d89ec5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1090,12 +1090,17 @@ def apply(self, other): class CustomBusinessMonthEnd(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') _prefix = 'CBM' class CustomBusinessMonthBegin(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning') + # TODO(py27): Replace condition with Subsitution after dropping Py27 + if _CustomBusinessMonth.__doc__: + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', + 'beginning') _prefix = 'CBMS' diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 624fbbbd4f05e..6b55554cdc941 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -4,7 +4,7 @@ import types import warnings from textwrap import dedent, wrap -from functools import wraps, update_wrapper +from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS def deprecate(name, alternative, version, alt_name=None, @@ -20,18 +20,18 @@ def deprecate(name, alternative, version, alt_name=None, Parameters ---------- name : str - Name of function to deprecate - alternative : str - Name of function to use instead + Name of function to deprecate. + alternative : func + Function to use instead. version : str - Version of pandas in which the method has been deprecated + Version of pandas in which the method has been deprecated. alt_name : str, optional - Name to use in preference of alternative.__name__ + Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str - The message to display in the warning. - Default is '{name} is deprecated. Use {alt_name} instead.' + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ @@ -39,25 +39,26 @@ def deprecate(name, alternative, version, alt_name=None, warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) - @wraps(alternative) + # adding deprecated directive to the docstring + msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) + msg = '\n '.join(wrap(msg, 70)) + + @Substitution(version=version, msg=msg) + @Appender(alternative.__doc__) def wrapper(*args, **kwargs): + """ + .. deprecated:: %(version)s + + %(msg)s + + """ warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) - # adding deprecated directive to the docstring - msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) - tpl = dedent(""" - .. deprecated:: {version} - - {msg} - - {rest} - """) - rest = getattr(wrapper, '__doc__', '') - docstring = tpl.format(version=version, - msg='\n '.join(wrap(msg, 70)), - rest=dedent(rest)) - wrapper.__doc__ = docstring + # Since we are using Substitution to create the required docstring, + # remove that from the attributes that should be assigned to the wrapper + assignments = tuple(x for x in WRAPPER_ASSIGNMENTS if x != '__doc__') + update_wrapper(wrapper, alternative, assigned=assignments) return wrapper
- [X] closes #21071 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21093
2018-05-16T21:26:01Z
2018-05-30T21:04:48Z
2018-05-30T21:04:48Z
2018-06-08T17:18:57Z
BUG: assert_index_equal does not raise error for check_categorical=False when comparing 2 CategoricalIndex objects
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..d211a21546978 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -46,6 +46,11 @@ Bug Fixes - - +Categorical +^^^^^^^^^^^ + +- Bug in :func:`pandas.util.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`) + Conversion ^^^^^^^^^^ diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index d6f58d16bcf64..ab7c4fb528452 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -503,6 +503,25 @@ def test_index_equal_metadata_message(self): with tm.assert_raises_regex(AssertionError, expected): assert_index_equal(idx1, idx2) + def test_categorical_index_equality(self): + expected = """Index are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_index_equality_relax_categories_check(self): + assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), + pd.Index(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertSeriesEqual(object): @@ -600,6 +619,25 @@ def test_series_equal_message(self): assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]), check_less_precise=True) + def test_categorical_series_equality(self): + expected = """Attributes are different + +Attribute "dtype" are different +\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) +\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ +ordered=False\\)""" + + with tm.assert_raises_regex(AssertionError, expected): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c']))) + + def test_categorical_series_equality_relax_categories_check(self): + assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), + pd.Series(pd.Categorical(['a', 'b'], + categories=['a', 'b', 'c'])), + check_categorical=False) + class TestAssertFrameEqual(object): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e1484a9c1b390..233eba6490937 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -778,8 +778,12 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, def _check_types(l, r, obj='Index'): if exact: - assert_class_equal(left, right, exact=exact, obj=obj) - assert_attr_equal('dtype', l, r, obj=obj) + assert_class_equal(l, r, exact=exact, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if check_categorical: + assert_attr_equal('dtype', l, r, obj=obj) + # allow string-like to have different inferred_types if l.inferred_type in ('string', 'unicode'): assert r.inferred_type in ('string', 'unicode') @@ -829,7 +833,8 @@ def _get_ilevel_values(index, level): # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) - if check_exact: + # skip exact index checking when `check_categorical` is False + if check_exact and check_categorical: if not left.equals(right): diff = np.sum((left.values != right.values) .astype(int)) * 100.0 / len(left) @@ -950,23 +955,23 @@ def is_sorted(seq): def assert_categorical_equal(left, right, check_dtype=True, - obj='Categorical', check_category_order=True): + check_category_order=True, obj='Categorical'): """Test that Categoricals are equivalent. Parameters ---------- - left, right : Categorical - Categoricals to compare + left : Categorical + right : Categorical check_dtype : bool, default True Check that integer dtype of the codes are the same - obj : str, default 'Categorical' - Specify object name being compared, internally used to show appropriate - assertion message check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message """ _check_isinstance(left, right, Categorical) @@ -1020,7 +1025,7 @@ def raise_assert_detail(obj, message, left, right, diff=None): def assert_numpy_array_equal(left, right, strict_nan=False, check_dtype=True, err_msg=None, - obj='numpy array', check_same=None): + check_same=None, obj='numpy array'): """ Checks that 'np.ndarray' is equivalent Parameters @@ -1033,11 +1038,11 @@ def assert_numpy_array_equal(left, right, strict_nan=False, check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message - check_same : None|'copy'|'same', default None - Ensure left and right refer/do not refer to the same memory area """ # instance validation
- [x] closes #19776 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry * tests added for `check_categorical` parameter in `assert_series_equal` * cleaned up `assert_categorical_equal` parameter order and docstring to match other functions (`obj` at the end) Thanks to all maintainers who helped answer questions on Gitter during the informal PyCon sprint!
https://api.github.com/repos/pandas-dev/pandas/pulls/21092
2018-05-16T18:55:14Z
2018-05-19T20:10:01Z
2018-05-19T20:10:00Z
2018-06-08T17:10:27Z
BUG: to_records() fails for MultiIndex DF (#21064)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 3886b6c142305..5f9e43a158b0f 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -131,7 +131,7 @@ Indexing MultiIndex ^^^^^^^^^^ -- +- Bug :func:`to_records` fails for empty MultiIndex DF (:issue:`21064`) - - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..187b885feec88 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1392,7 +1392,13 @@ def to_records(self, index=True, convert_datetime64=None): else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy - ix_vals = lmap(np.array, zip(*self.index.values)) + tuples = self.index.values + if len(tuples): + ix_vals = lmap(np.array, zip(*tuples)) + else: + # empty MultiIndex DF + ix_vals = [np.array([], dtype=lev.dtype) + for lev in self.index.levels] else: ix_vals = [self.index.values] diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 2472022b862bc..e701600b1cda7 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -328,3 +328,35 @@ def test_to_dict_index_dtypes(self, into, expected): result = DataFrame.from_dict(result, orient='index')[cols] expected = DataFrame.from_dict(expected, orient='index')[cols] tm.assert_frame_equal(result, expected) + + def test_to_records_with_multiindex(self): + size = 4 + idx1 = [u'a', u'a', u'b', u'b'] + idx2 = [u'x', u'y', u'x', u'y'] + tup = zip(idx1, idx2) + index = MultiIndex.from_tuples(tup) + random_data = np.random.randn(size, size) + df = DataFrame(random_data, index=index) + + result = df.to_records(index=True) + + col_arrays = [idx1, idx2] + [col for col in random_data.T] + expected = np.rec.fromarrays( + col_arrays, dtype=np.dtype([('level_0', '<U1'), ('level_1', '<U1'), + ('0', '<f8'), ('1', '<f8'), + ('2', '<f8'), ('3', '<f8')])) + + tm.assert_numpy_array_equal(result, expected) + + def test_to_records_with_empty_multiindex(self): + # GH 21064 + multi = MultiIndex([['a'], ['b']], labels=[[], []]) + df = DataFrame(columns=['A'], index=multi) + + expected = np.rec.fromarrays([[], [], []], + dtype=np.dtype([('level_0', 'O'), + ('level_1', 'O'), + ('A', 'O')])) + + result = df.to_records(index=True) + tm.assert_numpy_array_equal(result, expected)
- [x] closes #21064 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This fixes the bug that prevents using `to_records` on an empty dataframe that has a MultiIndex `self.index.values` returns an empty array (no tuples) for an empty MultiIndex DF and that would be used for `ix_values`. Instead, `ix_values should have an array of empty arrays, each one with the correct dtype according to the MultiIndex level/column.
https://api.github.com/repos/pandas-dev/pandas/pulls/21082
2018-05-16T12:10:05Z
2018-12-03T01:43:49Z
null
2018-12-03T01:43:49Z
Fixed extensionarray ref [ci skip]
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index feba9d856789b..a099fb40c35a7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -202,7 +202,7 @@ for storing ip addresses. ...: ``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas -:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. +:class:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. .. code-block:: ipython
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/21077
2018-05-16T11:09:26Z
2018-05-16T11:09:33Z
2018-05-16T11:09:33Z
2018-05-16T11:09:36Z
DOC: Start 0.24.0 whatsnew [ci skip]
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index d61a98fe2dae4..c744e44b4c17c 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,10 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.24.0.txt + +.. include:: whatsnew/v0.23.1.txt + .. include:: whatsnew/v0.23.0.txt .. include:: whatsnew/v0.22.0.txt diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt new file mode 100644 index 0000000000000..3886b6c142305 --- /dev/null +++ b/doc/source/whatsnew/v0.24.0.txt @@ -0,0 +1,179 @@ +.. _whatsnew_0240: + +v0.24.0 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0240.enhancements: + +New features +~~~~~~~~~~~~ + +.. _whatsnew_0240.enhancements.other: + +Other Enhancements +^^^^^^^^^^^^^^^^^^ +- +- +- + +.. _whatsnew_0240.api_breaking: + + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0240.api.other: + +Other API Changes +^^^^^^^^^^^^^^^^^ + +- +- +- + +.. _whatsnew_0240.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.prior_deprecations: + +Removal of prior version deprecations/changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.docs: + +Documentation Changes +~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0240.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +Categorical +^^^^^^^^^^^ + +- +- +- + +Datetimelike +^^^^^^^^^^^^ + +- +- +- + +Timedelta +^^^^^^^^^ + +- +- +- + +Timezones +^^^^^^^^^ + +- +- +- + +Offsets +^^^^^^^ + +- +- +- + +Numeric +^^^^^^^ + +- +- +- + +Strings +^^^^^^^ + +- +- +- + +Indexing +^^^^^^^^ + +- +- +- + +MultiIndex +^^^^^^^^^^ + +- +- +- + +I/O +^^^ + +- +- +- + +Plotting +^^^^^^^^ + +- +- +- + +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- +- +- + +Sparse +^^^^^^ + +- +- +- + +Reshaping +^^^^^^^^^ + +- +- +- + +Other +^^^^^ + +- +- +- +
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/21072
2018-05-16T03:07:04Z
2018-05-16T03:07:34Z
2018-05-16T03:07:34Z
2018-05-26T10:45:00Z
DOC: Correct the date of whatsnew v0.23 #21067
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f89de1dc22d8..feba9d856789b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1,6 +1,6 @@ .. _whatsnew_0230: -v0.23.0 (May 15, 2017) +v0.23.0 (May 15, 2018) ---------------------- This is a major release from 0.22.0 and includes a number of API changes,
- [x] closes #21067 - [x] tests added / passed (NA, just the DOC) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21069
2018-05-16T02:24:03Z
2018-05-16T03:02:00Z
2018-05-16T03:02:00Z
2018-05-16T13:34:50Z
Prevent Unlimited Agg Recursion with Duplicate Col Names
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..338364a943edf 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -43,7 +43,10 @@ Documentation Changes Bug Fixes ~~~~~~~~~ -- +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) - Conversion diff --git a/pandas/core/base.py b/pandas/core/base.py index fa78c89ed4ee7..aa051c6f5eaef 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -590,9 +590,10 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # multiples else: - for col in obj: + for index, col in enumerate(obj): try: - colg = self._gotitem(col, ndim=1, subset=obj[col]) + colg = self._gotitem(col, ndim=1, + subset=obj.iloc[:, index]) results.append(colg.aggregate(arg)) keys.append(col) except (TypeError, DataError): @@ -675,7 +676,6 @@ def _gotitem(self, key, ndim, subset=None): subset : object, default None subset to act on """ - # create a new object to prevent aliasing if subset is None: subset = self.obj diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..77a67c048a48d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5731,7 +5731,12 @@ def diff(self, periods=1, axis=0): # ---------------------------------------------------------------------- # Function application - def _gotitem(self, key, ndim, subset=None): + def _gotitem(self, + key, # type: Union[str, List[str]] + ndim, # type: int + subset=None # type: Union[Series, DataFrame, None] + ): + # type: (...) -> Union[Series, DataFrame] """ sub-classes to define return a sliced object @@ -5746,9 +5751,11 @@ def _gotitem(self, key, ndim, subset=None): """ if subset is None: subset = self + elif subset.ndim == 1: # is Series + return subset # TODO: _shallow_copy(subset)? - return self[key] + return subset[key] _agg_doc = dedent(""" The aggregation operations are always performed over an axis, either the diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ac46f02d00773..dfb2961befe35 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -554,6 +554,14 @@ def test_apply_non_numpy_dtype(self): result = df.apply(lambda x: x) assert_frame_equal(result, df) + def test_apply_dup_names_multi_agg(self): + # GH 21063 + df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a']) + expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min']) + result = df.agg(['min']) + + tm.assert_frame_equal(result, expected) + class TestInferOutputShape(object): # the user has supplied an opaque UDF where
- [X] closes #21063 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The `_gotitem` implementation for `DataFrame` seems a little strange so there may be a more comprehensive approach, but this should prevent the issue for the time being Didn't add whatsnew yet since none existed for 0.23.1. Happy to add if we are OK with this fix
https://api.github.com/repos/pandas-dev/pandas/pulls/21066
2018-05-15T18:29:40Z
2018-05-17T12:42:15Z
2018-05-17T12:42:14Z
2018-06-08T17:08:34Z
DOC: updated docstring for nanoseconds function per doc guidelines
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index d17d4e7139d72..f7bb6c1dbb304 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -791,9 +791,32 @@ cdef class _Timedelta(timedelta): @property def nanoseconds(self): """ - Number of nanoseconds (>= 0 and less than 1 microsecond). + Return the number of nanoseconds (n), where 0 <= n < 1 microsecond. + + Returns + ------- + int + Number of nanoseconds. + + See Also + -------- + Timedelta.components : Return all attributes with assigned values + (i.e. days, hours, minutes, seconds, milliseconds, microseconds, + nanoseconds). + + Examples + -------- + **Using string input** + + >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.nanoseconds + 42 + + **Using integer input** - .components will return the shown components + >>> td = pd.Timedelta(42, unit='ns') + >>> td.nanoseconds + 42 """ self._ensure_components() return self._ns
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ################### Docstring (pandas.Timedelta.nanoseconds) ################### ################################################################################ Return the number of nanoseconds (n), where 0 <= n < 1 microsecond. Returns ------- int : Number of nanoseconds See Also -------- Timedelta.components : Return all attributes with assigned values (i.e. days, seconds, microseconds, nanoseconds) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No examples section found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21065
2018-05-15T17:20:02Z
2018-05-17T00:20:43Z
2018-05-17T00:20:43Z
2018-06-08T17:06:45Z
Pass sort for agg multiple
diff --git a/pandas/core/base.py b/pandas/core/base.py index 5022beabef76b..fa78c89ed4ee7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -608,7 +608,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): raise ValueError("no results") try: - return concat(results, keys=keys, axis=1) + return concat(results, keys=keys, axis=1, sort=False) except TypeError: # we are concatting non-NDFrame objects, diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index af39c8f01cf73..ac46f02d00773 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -908,6 +908,31 @@ def test_demo(self): index=['max', 'min', 'sum']) tm.assert_frame_equal(result.reindex_like(expected), expected) + def test_agg_multiple_mixed_no_warning(self): + # https://github.com/pandas-dev/pandas/issues/20909 + mdf = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0], + "C": ['bar', 'foobarbaz'], + "D": [pd.Timestamp('2013-01-01'), pd.NaT]}, + index=['min', 'sum']) + # sorted index + with tm.assert_produces_warning(None): + result = mdf.agg(['min', 'sum']) + + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(None): + result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min']) + + # For backwards compatibility, the result's index is + # still sorted by function name, so it's ['min', 'sum'] + # not ['sum', 'min']. + expected = expected[['D', 'C', 'B', 'A']] + tm.assert_frame_equal(result, expected) + def test_agg_dict_nested_renaming_depr(self): df = pd.DataFrame({'A': range(5), 'B': 5})
xref https://github.com/pandas-dev/pandas/issues/20909
https://api.github.com/repos/pandas-dev/pandas/pulls/21062
2018-05-15T16:07:43Z
2018-05-15T20:02:19Z
2018-05-15T20:02:19Z
2018-05-15T20:02:33Z
DOC: add highlights and toc to whatsnew file for 0.23.0
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5fe397a7cbb37..32db2ff5ebb24 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -42,7 +42,7 @@ pandas 0.23.0 **Release date**: May 15, 2017 -This is a major release from 0.23.0 and includes a number of API changes, new +This is a major release from 0.22.0 and includes a number of API changes, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. @@ -54,6 +54,7 @@ Highlights include: - :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. - :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. - :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. +- :ref:`Changes to make output shape of DataFrame.apply consistent <whatsnew_0230.api_breaking.apply>`. See the :ref:`full whatsnew <whatsnew_0230>` for a list of all the changes. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 89dab728d2bd4..3f89de1dc22d8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -8,90 +8,114 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +Highlights include: + +- :ref:`Round-trippable JSON format with 'table' orient <whatsnew_0230.enhancements.round-trippable_json>`. +- :ref:`Instantiation from dicts respects order for Python 3.6+ <whatsnew_0230.api_breaking.dict_insertion_order>`. +- :ref:`Dependent column arguments for assign <whatsnew_0230.enhancements.assign_dependent>`. +- :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. +- :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. +- :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. +- :ref:`Changes to make output shape of DataFrame.apply consistent <whatsnew_0230.api_breaking.apply>`. + +Check the :ref:`API Changes <whatsnew_0230.api_breaking>` and :ref:`deprecations <whatsnew_0230.deprecations>` before updating. + .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. See :ref:`install.dropping-27` for more. +.. contents:: What's new in v0.23.0 + :local: + :backlinks: none + :depth: 2 + .. _whatsnew_0230.enhancements: New features ~~~~~~~~~~~~ -.. _whatsnew_0210.enhancements.limit_area: - -``DataFrame.interpolate`` has gained the ``limit_area`` kwarg -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _whatsnew_0230.enhancements.round-trippable_json: -:meth:`DataFrame.interpolate` has gained a ``limit_area`` parameter to allow further control of which ``NaN`` s are replaced. -Use ``limit_area='inside'`` to fill only NaNs surrounded by valid values or use ``limit_area='outside'`` to fill only ``NaN`` s -outside the existing valid values while preserving those inside. (:issue:`16284`) See the :ref:`full documentation here <missing_data.interp_limits>`. +JSON read/write round-trippable with ``orient='table'`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A ``DataFrame`` can now be written to and subsequently read back via JSON while preserving metadata through usage of the ``orient='table'`` argument (see :issue:`18912` and :issue:`9146`). Previously, none of the available ``orient`` values guaranteed the preservation of dtypes and index names, amongst other metadata. .. ipython:: python - ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) - ser + df = pd.DataFrame({'foo': [1, 2, 3, 4], + 'bar': ['a', 'b', 'c', 'd'], + 'baz': pd.date_range('2018-01-01', freq='d', periods=4), + 'qux': pd.Categorical(['a', 'b', 'c', 'c']) + }, index=pd.Index(range(4), name='idx')) + df + df.dtypes + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes -Fill one consecutive inside value in both directions +Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. .. ipython:: python + :okwarning: - ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + df.index.name = 'index' -Fill all consecutive outside values backward + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes .. ipython:: python + :suppress: - ser.interpolate(limit_direction='backward', limit_area='outside') + import os + os.remove('test.json') -Fill all consecutive outside values in both directions -.. ipython:: python - - ser.interpolate(limit_direction='both', limit_area='outside') +.. _whatsnew_0230.enhancements.assign_dependent: -.. _whatsnew_0210.enhancements.get_dummies_dtype: -``get_dummies`` now supports ``dtype`` argument -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``.assign()`` accepts dependent arguments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`) +The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 +<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the +:ref:`documentation here <dsintro.chained_assignment>` (:issue:`14207`) .. ipython:: python - df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) - pd.get_dummies(df, columns=['c']).dtypes - pd.get_dummies(df, columns=['c'], dtype=bool).dtypes - - -.. _whatsnew_0230.enhancements.window_raw: + df = pd.DataFrame({'A': [1, 2, 3]}) + df + df.assign(B=df.A, C=lambda x:x['A']+ x['B']) -Rolling/Expanding.apply() accepts a ``raw`` keyword to pass a ``Series`` to the function -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. warning:: -:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, -:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. -This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The -default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. -In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) + This may subtly change the behavior of your code when you're + using ``.assign()`` to update an existing column. Previously, callables + referring to other variables being updated would get the "old" values -.. ipython:: python + Previous Behavior: - s = pd.Series(np.arange(5), np.arange(5) + 1) - s + .. code-block:: ipython -Pass a ``Series``: + In [2]: df = pd.DataFrame({"A": [1, 2, 3]}) -.. ipython:: python + In [3]: df.assign(A=lambda df: df.A + 1, C=lambda df: df.A * -1) + Out[3]: + A C + 0 2 -1 + 1 3 -2 + 2 4 -3 - s.rolling(2, min_periods=1).apply(lambda x: x.iloc[-1], raw=False) + New Behavior: -Mimic the original behavior of passing a ndarray: + .. ipython:: python -.. ipython:: python + df.assign(A=df.A+1, C= lambda df: df.A* -1) - s.rolling(2, min_periods=1).apply(lambda x: x[-1], raw=True) .. _whatsnew_0230.enhancements.merge_on_columns_and_levels: @@ -151,6 +175,194 @@ resetting indexes. See the :ref:`Sorting by Indexes and Values # Sort by 'second' (index) and 'A' (column) df_multi.sort_values(by=['second', 'A']) + +.. _whatsnew_023.enhancements.extension: + +Extending Pandas with Custom Types (Experimental) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy +arrays as columns in a DataFrame or values in a Series. This allows third-party +libraries to implement extensions to NumPy's types, similar to how pandas +implemented categoricals, datetimes with timezones, periods, and intervals. + +As a demonstration, we'll use cyberpandas_, which provides an ``IPArray`` type +for storing ip addresses. + +.. code-block:: ipython + + In [1]: from cyberpandas import IPArray + + In [2]: values = IPArray([ + ...: 0, + ...: 3232235777, + ...: 42540766452641154071740215577757643572 + ...: ]) + ...: + ...: + +``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas +:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. + +.. code-block:: ipython + + In [3]: ser = pd.Series(values) + + In [4]: ser + Out[4]: + 0 0.0.0.0 + 1 192.168.1.1 + 2 2001:db8:85a3::8a2e:370:7334 + dtype: ip + +Notice that the dtype is ``ip``. The missing value semantics of the underlying +array are respected: + +.. code-block:: ipython + + In [5]: ser.isna() + Out[5]: + 0 True + 1 False + 2 False + dtype: bool + +For more, see the :ref:`extension types <extending.extension-types>` +documentation. If you build an extension array, publicize it on our +:ref:`ecosystem page <ecosystem.extensions>`. + +.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ + + +.. _whatsnew_0230.enhancements.categorical_grouping: + +New ``observed`` keyword for excluding unobserved categories in ``groupby`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Grouping by a categorical includes the unobserved categories in the output. +When grouping by multiple categorical columns, this means you get the cartesian product of all the +categories, including combinations where there are no observations, which can result in a large +number of groups. We have added a keyword ``observed`` to control this behavior, it defaults to +``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df['C'] = ['foo', 'bar'] * 2 + df + +To show all values, the previous behavior: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=False).count() + + +To show only observed values: + +.. ipython:: python + + df.groupby(['A', 'B', 'C'], observed=True).count() + +For pivotting operations, this behavior is *already* controlled by the ``dropna`` keyword: + +.. ipython:: python + + cat1 = pd.Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + cat2 = pd.Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df + +.. ipython:: python + + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=True) + pd.pivot_table(df, values='values', index=['A', 'B'], + dropna=False) + + +.. _whatsnew_0230.enhancements.window_raw: + +Rolling/Expanding.apply() accepts ``raw=False`` to pass a ``Series`` to the function +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, +:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. +This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The +default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. +In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) + +.. ipython:: python + + s = pd.Series(np.arange(5), np.arange(5) + 1) + s + +Pass a ``Series``: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x.iloc[-1], raw=False) + +Mimic the original behavior of passing a ndarray: + +.. ipython:: python + + s.rolling(2, min_periods=1).apply(lambda x: x[-1], raw=True) + + +.. _whatsnew_0210.enhancements.limit_area: + +``DataFrame.interpolate`` has gained the ``limit_area`` kwarg +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`DataFrame.interpolate` has gained a ``limit_area`` parameter to allow further control of which ``NaN`` s are replaced. +Use ``limit_area='inside'`` to fill only NaNs surrounded by valid values or use ``limit_area='outside'`` to fill only ``NaN`` s +outside the existing valid values while preserving those inside. (:issue:`16284`) See the :ref:`full documentation here <missing_data.interp_limits>`. + + +.. ipython:: python + + ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) + ser + +Fill one consecutive inside value in both directions + +.. ipython:: python + + ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + +Fill all consecutive outside values backward + +.. ipython:: python + + ser.interpolate(limit_direction='backward', limit_area='outside') + +Fill all consecutive outside values in both directions + +.. ipython:: python + + ser.interpolate(limit_direction='both', limit_area='outside') + +.. _whatsnew_0210.enhancements.get_dummies_dtype: + +``get_dummies`` now supports ``dtype`` argument +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`) + +.. ipython:: python + + df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) + pd.get_dummies(df, columns=['c']).dtypes + pd.get_dummies(df, columns=['c'], dtype=bool).dtypes + + .. _whatsnew_0230.enhancements.timedelta_mod: Timedelta mod method @@ -227,86 +439,6 @@ These bugs were squashed: - Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) - Bug in :func:`DataFrameGroupBy.rank` where ranks were incorrect when both infinity and ``NaN`` were present (:issue:`20561`) -.. _whatsnew_0230.enhancements.round-trippable_json: - -JSON read/write round-trippable with ``orient='table'`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A ``DataFrame`` can now be written to and subsequently read back via JSON while preserving metadata through usage of the ``orient='table'`` argument (see :issue:`18912` and :issue:`9146`). Previously, none of the available ``orient`` values guaranteed the preservation of dtypes and index names, amongst other metadata. - -.. ipython:: python - - df = pd.DataFrame({'foo': [1, 2, 3, 4], - 'bar': ['a', 'b', 'c', 'd'], - 'baz': pd.date_range('2018-01-01', freq='d', periods=4), - 'qux': pd.Categorical(['a', 'b', 'c', 'c']) - }, index=pd.Index(range(4), name='idx')) - df - df.dtypes - df.to_json('test.json', orient='table') - new_df = pd.read_json('test.json', orient='table') - new_df - new_df.dtypes - -Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. - -.. ipython:: python - :okwarning: - - df.index.name = 'index' - - df.to_json('test.json', orient='table') - new_df = pd.read_json('test.json', orient='table') - new_df - new_df.dtypes - -.. ipython:: python - :suppress: - - import os - os.remove('test.json') - - -.. _whatsnew_0230.enhancements.assign_dependent: - - -``.assign()`` accepts dependent arguments -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 -<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the -:ref:`documentation here <dsintro.chained_assignment>` (:issue:`14207`) - -.. ipython:: python - - df = pd.DataFrame({'A': [1, 2, 3]}) - df - df.assign(B=df.A, C=lambda x:x['A']+ x['B']) - -.. warning:: - - This may subtly change the behavior of your code when you're - using ``.assign()`` to update an existing column. Previously, callables - referring to other variables being updated would get the "old" values - - Previous Behavior: - - .. code-block:: ipython - - In [2]: df = pd.DataFrame({"A": [1, 2, 3]}) - - In [3]: df.assign(A=lambda df: df.A + 1, C=lambda df: df.A * -1) - Out[3]: - A C - 0 2 -1 - 1 3 -2 - 2 4 -3 - - New Behavior: - - .. ipython:: python - - df.assign(A=df.A+1, C= lambda df: df.A* -1) .. _whatsnew_0230.enhancements.str_cat_align: @@ -358,116 +490,6 @@ Supplying a ``CategoricalDtype`` will make the categories in each column consist df['A'].dtype df['B'].dtype -.. _whatsnew_023.enhancements.extension: - -Extending Pandas with Custom Types (Experimental) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Pandas now supports storing array-like objects that aren't necessarily 1-D NumPy -arrays as columns in a DataFrame or values in a Series. This allows third-party -libraries to implement extensions to NumPy's types, similar to how pandas -implemented categoricals, datetimes with timezones, periods, and intervals. - -As a demonstration, we'll use cyberpandas_, which provides an ``IPArray`` type -for storing ip addresses. - -.. code-block:: ipython - - In [1]: from cyberpandas import IPArray - - In [2]: values = IPArray([ - ...: 0, - ...: 3232235777, - ...: 42540766452641154071740215577757643572 - ...: ]) - ...: - ...: - -``IPArray`` isn't a normal 1-D NumPy array, but because it's a pandas -:ref:`~pandas.api.extension.ExtensionArray`, it can be stored properly inside pandas' containers. - -.. code-block:: ipython - - In [3]: ser = pd.Series(values) - - In [4]: ser - Out[4]: - 0 0.0.0.0 - 1 192.168.1.1 - 2 2001:db8:85a3::8a2e:370:7334 - dtype: ip - -Notice that the dtype is ``ip``. The missing value semantics of the underlying -array are respected: - -.. code-block:: ipython - - In [5]: ser.isna() - Out[5]: - 0 True - 1 False - 2 False - dtype: bool - -For more, see the :ref:`extension types <extending.extension-types>` -documentation. If you build an extension array, publicize it on our -:ref:`ecosystem page <ecosystem.extensions>`. - -.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest/ - -.. _whatsnew_0230.enhancements.categorical_grouping: - -Categorical Groupers has gained an observed keyword -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Grouping by a categorical includes the unobserved categories in the output. -When grouping with multiple groupers, this means you get the cartesian product of all the -categories, including combinations where there are no observations, which can result in a large -number of groupers. We have added a keyword ``observed`` to control this behavior, it defaults to -``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) - - -.. ipython:: python - - cat1 = pd.Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - cat2 = pd.Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = pd.DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) - df['C'] = ['foo', 'bar'] * 2 - df - -To show all values, the previous behavior: - -.. ipython:: python - - df.groupby(['A', 'B', 'C'], observed=False).count() - - -To show only observed values: - -.. ipython:: python - - df.groupby(['A', 'B', 'C'], observed=True).count() - -For pivotting operations, this behavior is *already* controlled by the ``dropna`` keyword: - -.. ipython:: python - - cat1 = pd.Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - cat2 = pd.Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) - df - -.. ipython:: python - - pd.pivot_table(df, values='values', index=['A', 'B'], - dropna=True) - pd.pivot_table(df, values='values', index=['A', 'B'], - dropna=False) - .. _whatsnew_0230.enhancements.other: @@ -519,7 +541,7 @@ Other Enhancements - :func:`read_html` now reads all ``<tbody>`` elements in a ``<table>``, not just the first. (:issue:`20690`) - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) -- :class:`pandas.tseries.api.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). +- :class:`~pandas.tseries.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). - :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5 - Updated :meth:`DataFrame.to_gbq` and :meth:`pandas.read_gbq` signature and documentation to reflect changes from the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ
@TomAugspurger in case you didn't do this yet
https://api.github.com/repos/pandas-dev/pandas/pulls/21061
2018-05-15T15:31:46Z
2018-05-15T20:05:26Z
2018-05-15T20:05:26Z
2018-05-15T20:05:26Z
Add epoch alternative to deprecation message
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 248c648c33db3..d17d4e7139d72 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1196,6 +1196,9 @@ class Timedelta(_Timedelta): msg = textwrap.dedent("""\ Floor division between integer array and Timedelta is deprecated. Use 'array // timedelta.value' instead. + If you want to obtain epochs from an array of timestamps, + you can rather use + 'array - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")'. """) warnings.warn(msg, FutureWarning) return other // self.value
cfr https://github.com/pandas-dev/pandas/pull/21036#issuecomment-389162167
https://api.github.com/repos/pandas-dev/pandas/pulls/21060
2018-05-15T15:24:59Z
2018-05-15T18:09:49Z
2018-05-15T18:09:49Z
2018-05-15T18:11:18Z
0.22.x
diff --git a/.gitignore b/.gitignore index ff0a6aef47163..b1748ae72b8ba 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ doc/build/html/index.html doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ +env/ diff --git a/.travis.yml b/.travis.yml index fe1a2950dbf08..42b4ef0396fc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,8 +102,6 @@ before_install: - uname -a - git --version - git tag - - ci/before_install_travis.sh - - export DISPLAY=":99.0" install: - echo "install start" @@ -114,6 +112,8 @@ install: before_script: - ci/install_db_travis.sh + - export DISPLAY=":99.0" + - ci/before_script_travis.sh script: - echo "script start" diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..44af73b498aa8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,7 +22,7 @@ environment: PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" - CONDA_NPY: "112" + CONDA_NPY: "113" - CONDA_ROOT: "C:\\Miniconda3_64" PYTHON_VERSION: "2.7" diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index dda684b35e301..16889b2f19e89 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -10,15 +10,37 @@ def date_range(start=None, end=None, periods=None, freq=None): from pandas.tools.plotting import andrews_curves +class Plotting(object): + goal_time = 0.2 + + def setup(self): + import matplotlib + matplotlib.use('Agg') + self.s = Series(np.random.randn(1000000)) + self.df = DataFrame({'col': self.s}) + + def time_series_plot(self): + self.s.plot() + + def time_frame_plot(self): + self.df.plot() + + class TimeseriesPlotting(object): goal_time = 0.2 def setup(self): import matplotlib matplotlib.use('Agg') - self.N = 2000 - self.M = 5 - self.df = DataFrame(np.random.randn(self.N, self.M), index=date_range('1/1/1975', periods=self.N)) + N = 2000 + M = 5 + idx = date_range('1/1/1975', periods=N) + self.df = DataFrame(np.random.randn(N, M), index=idx) + + idx_irregular = pd.DatetimeIndex(np.concatenate((idx.values[0:10], + idx.values[12:]))) + self.df2 = DataFrame(np.random.randn(len(idx_irregular), M), + index=idx_irregular) def time_plot_regular(self): self.df.plot() @@ -26,6 +48,9 @@ def time_plot_regular(self): def time_plot_regular_compat(self): self.df.plot(x_compat=True) + def time_plot_irregular(self): + self.df2.plot() + class Misc(object): goal_time = 0.6 diff --git a/ci/before_install_travis.sh b/ci/before_script_travis.sh similarity index 93% rename from ci/before_install_travis.sh rename to ci/before_script_travis.sh index 2d0b4da6120dc..0b3939b1906a2 100755 --- a/ci/before_install_travis.sh +++ b/ci/before_script_travis.sh @@ -4,6 +4,7 @@ echo "inside $0" if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sh -e /etc/init.d/xvfb start + sleep 3 fi # Never fail because bad things happened here. diff --git a/ci/check_imports.py b/ci/check_imports.py index a83436e7d258c..d6f24ebcc4d3e 100644 --- a/ci/check_imports.py +++ b/ci/check_imports.py @@ -9,7 +9,6 @@ 'ipython', 'jinja2' 'lxml', - 'matplotlib', 'numexpr', 'openpyxl', 'py', diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml new file mode 100644 index 0000000000000..c3d3d59f895c6 --- /dev/null +++ b/ci/environment-dev.yaml @@ -0,0 +1,14 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - Cython + - NumPy + - moto + - pytest + - python-dateutil + - python=3 + - pytz + - setuptools + - sphinx diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..dac3625cba4ba 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,9 +34,9 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -q -O miniconda.sh || exit 1 else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 @@ -107,7 +107,7 @@ time conda install -n pandas pytest>=3.1.0 time pip install pytest-xdist moto if [ "$LINT" ]; then - conda install flake8 + conda install flake8=3.4.1 pip install cpplint fi diff --git a/ci/requirements-2.7_BUILD_TEST.pip b/ci/requirements-2.7_BUILD_TEST.pip index a0fc77c40bc00..f4617133cad5b 100644 --- a/ci/requirements-2.7_BUILD_TEST.pip +++ b/ci/requirements-2.7_BUILD_TEST.pip @@ -1,7 +1,6 @@ xarray geopandas seaborn -pandas_gbq pandas_datareader statsmodels scikit-learn diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip index 6e4f7b65f9728..c9565f2173070 100644 --- a/ci/requirements-3.5.pip +++ b/ci/requirements-3.5.pip @@ -1,2 +1,2 @@ xarray==0.9.1 -pandas-gbq +pandas_gbq diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh new file mode 100644 index 0000000000000..f5c3dbf59a29d --- /dev/null +++ b/ci/requirements-3.6.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +source activate pandas + +echo "[install 3.6 downstream deps]" + +conda install -n pandas -c conda-forge pandas-datareader xarray geopandas seaborn statsmodels scikit-learn dask diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index af7a90b126f22..db2d429a2a4ff 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -1,6 +1,6 @@ python-dateutil pytz -numpy=1.12* +numpy=1.13* bottleneck openpyxl xlsxwriter diff --git a/ci/requirements_all.txt b/ci/requirements-optional-conda.txt similarity index 68% rename from ci/requirements_all.txt rename to ci/requirements-optional-conda.txt index e13afd619f105..6edb8d17337e4 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements-optional-conda.txt @@ -1,28 +1,27 @@ -pytest>=3.1.0 -pytest-cov -pytest-xdist -flake8 -sphinx=1.5* -nbsphinx -ipython -python-dateutil -pytz -openpyxl -xlsxwriter -xlrd -xlwt -html5lib -patsy beautifulsoup4 -numpy -cython -scipy +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +ipykernel +jinja2 +lxml +matplotlib +nbsphinx numexpr +openpyxl +pyarrow +pymysql pytables -matplotlib +pytest-cov +pytest-xdist +s3fs +scipy seaborn -lxml sqlalchemy -bottleneck -pymysql -Jinja2 +xarray +xlrd +xlsxwriter +xlwt diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt new file mode 100644 index 0000000000000..06b22bd8f2c63 --- /dev/null +++ b/ci/requirements-optional-pip.txt @@ -0,0 +1,27 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directlybeautifulsoup4 +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +jinja2 +lxml +matplotlib +nbsphinx +numexpr +openpyxl +pyarrow +pymysql +tables +pytest-cov +pytest-xdist +s3fs +scipy +seaborn +sqlalchemy +xarray +xlrd +xlsxwriter +xlwt \ No newline at end of file diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index dbc4f6cbd6509..2fb36b7cd70d8 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -1,8 +1,10 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directly +Cython +NumPy +moto +pytest python-dateutil pytz -numpy -cython -pytest>=3.1.0 -pytest-cov -flake8 -moto +setuptools +sphinx \ No newline at end of file diff --git a/ci/script_multi.sh b/ci/script_multi.sh index ee9fbcaad5ef5..ae8f030b92d66 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e echo "[script multi]" diff --git a/doc/source/api.rst b/doc/source/api.rst index 80f8d42be8ed6..a9766b5c04496 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1794,6 +1794,7 @@ Methods Timestamp.strftime Timestamp.strptime Timestamp.time + Timestamp.timestamp Timestamp.timetuple Timestamp.timetz Timestamp.to_datetime64 @@ -2173,6 +2174,17 @@ Style Export and Import Styler.export Styler.use +Plotting +~~~~~~~~ + +.. currentmodule:: pandas + +.. autosummary:: + :toctree: generated/ + + plotting.register_matplotlib_converters + plotting.deregister_matplotlib_converters + .. currentmodule:: pandas General utility functions diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 466ac3c9cbf51..cd3cc282a8010 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -346,7 +346,9 @@ The following methods are available: :meth:`~Window.sum`, Sum of values :meth:`~Window.mean`, Mean of values -The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are: +The weights used in the window are specified by the ``win_type`` keyword. +The list of recognized types are the `scipy.signal window functions +<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index d8d57a8bfffdd..4426d3fb0165e 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -11,32 +11,32 @@ Where to start? =============== All contributions, bug reports, bug fixes, documentation improvements, -enhancements and ideas are welcome. +enhancements, and ideas are welcome. -If you are simply looking to start working with the *pandas* codebase, navigate to the -`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through -interesting issues. There are a number of issues listed under `Docs +If you are brand new to pandas or open-source development, we recommend going +through the `GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ +to find issues that interest you. There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ and `Difficulty Novice <https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ -where you could start out. - -Or maybe through using *pandas* you have an idea of your own or are looking for something -in the documentation and thinking 'this can be improved'...you can do something -about it! +where you could start out. Once you've found an interesting issue, you can +return here to get your development environment setup. Feel free to ask questions on the `mailing list -<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter -<https://gitter.im/pydata/pandas>`_. +<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter`_. + +.. _contributing.bug_reports: Bug reports and enhancement requests ==================================== -Bug reports are an important part of making *pandas* more stable. Having a complete bug report -will allow others to reproduce the bug and provide insight into fixing. Because many versions of -*pandas* are supported, knowing version information will also identify improvements made since -previous versions. Trying the bug-producing code out on the *master* branch is often a worthwhile exercise -to confirm the bug still exists. It is also worth searching existing bug reports and pull requests +Bug reports are an important part of making *pandas* more stable. Having a complete bug report +will allow others to reproduce the bug and provide insight into fixing. See +`this stackoverflow article <https://stackoverflow.com/help/mcve>`_ for tips on +writing a good bug report. + +Trying the bug-producing code out on the *master* branch is often a worthwhile exercise +to confirm the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. Bug reports must: @@ -60,12 +60,16 @@ Bug reports must: The issue will then show up to the *pandas* community and be open to comments/ideas from others. +.. _contributing.github + Working with the code ===================== Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *pandas* code base. +.. _contributing.version_control: + Version control, Git, and GitHub -------------------------------- @@ -103,167 +107,164 @@ want to clone your fork to your machine:: git clone https://github.com/your-user-name/pandas.git pandas-yourname cd pandas-yourname - git remote add upstream git://github.com/pandas-dev/pandas.git + git remote add upstream https://github.com/pandas-dev/pandas.git This creates the directory `pandas-yourname` and connects your repository to the upstream (main project) *pandas* repository. -Creating a branch ------------------ +.. _contributing.dev_env: -You want your master branch to reflect only production-ready code, so create a -feature branch for making your changes. For example:: +Creating a development environment +---------------------------------- - git branch shiny-new-feature - git checkout shiny-new-feature +To test out code changes, you'll need to build pandas from source, which +requires a C compiler and python environment. If you're making documentation +changes, you can skip to :ref:`contributing.documentation` but you won't be able +to build the documentation locally before pushing your changes. -The above can be simplified to:: +.. _contributiong.dev_c: - git checkout -b shiny-new-feature +Installing a C Complier +~~~~~~~~~~~~~~~~~~~~~~~ -This changes your working directory to the shiny-new-feature branch. Keep any -changes in this branch specific to one bug or feature so it is clear -what the branch brings to *pandas*. You can have many shiny-new-features -and switch in between them using the git checkout command. +Pandas uses C extensions (mostly written using Cython) to speed up certain +operations. To install pandas from source, you need to compile these C +extensions, which means you need a C complier. This process depends on which +platform you're using. Follow the `CPython contributing guidelines +<https://docs.python.org/devguide/setup.html#build-dependencies>`_ for getting a +complier installed. You don't need to do any of the ``./configure`` or ``make`` +steps; you only need to install the complier. -To update this branch, you need to retrieve the changes from the master branch:: +For Windows developers, the following links may be helpful. - git fetch upstream - git rebase upstream/master +- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ +- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit +- https://cowboyprogrammer.org/building-python-wheels-for-windows/ +- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ +- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy -This will replay your commits on top of the latest pandas git master. If this -leads to merge conflicts, you must resolve these before submitting your pull -request. If you have uncommitted changes, you will need to ``stash`` them prior -to updating. This will effectively store your changes and they can be reapplied -after updating. +Let us know if you have any difficulties by opening an issue or reaching out on +`Gitter`_. -.. _contributing.dev_env: +.. _contributiong.dev_python: -Creating a development environment ----------------------------------- +Creating a Python Environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -An easy way to create a *pandas* development environment is as follows. +Now that you have a C complier, create an isolated pandas development +environment: -- Install either :ref:`Anaconda <install.anaconda>` or :ref:`miniconda <install.miniconda>` +- Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda + <https://conda.io/miniconda.html>`_ +- Make sure your conda is up to date (``conda update conda``) - Make sure that you have :ref:`cloned the repository <contributing.forking>` - ``cd`` to the *pandas* source directory -Tell conda to create a new environment, named ``pandas_dev``, or any other name you would like -for this environment, by running:: - - conda create -n pandas_dev --file ci/requirements_dev.txt - - -For a python 3 environment:: - - conda create -n pandas_dev python=3 --file ci/requirements_dev.txt - -.. warning:: - - If you are on Windows, see :ref:`here for a fully compliant Windows environment <contributing.windows>`. - -This will create the new environment, and not touch any of your existing environments, -nor any existing python installation. It will install all of the basic dependencies of -*pandas*, as well as the development and testing tools. If you would like to install -other dependencies, you can install them as follows:: +We'll now kick off a three-step process: - conda install -n pandas_dev -c pandas pytables scipy +1. Install the build dependencies +2. Build and install pandas +3. Install the optional dependencies -To install *all* pandas dependencies you can do the following:: +.. code-block:: none - conda install -n pandas_dev -c conda-forge --file ci/requirements_all.txt + # Create and activate the build environment + conda env create -f ci/environment-dev.yaml + conda activate pandas-dev -To work in this environment, Windows users should ``activate`` it as follows:: + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . - activate pandas_dev + # Install the rest of the optional dependencies + conda install -c defaults -c conda-forge --file=ci/requirements-optional-conda.txt -Mac OSX / Linux users should use:: +At this point you should be able to import pandas from your locally built version:: - source activate pandas_dev + $ python # start an interpreter + >>> import pandas + >>> print(pandas.__version__) + 0.22.0.dev0+29.g4ad6d4d74 -You will then see a confirmation message to indicate you are in the new development environment. +This will create the new environment, and not touch any of your existing environments, +nor any existing python installation. To view your environments:: conda info -e -To return to your home root environment in Windows:: - - deactivate +To return to your root environment:: -To return to your home root environment in OSX / Linux:: - - source deactivate + conda deactivate See the full conda docs `here <http://conda.pydata.org/docs>`__. -At this point you can easily do an *in-place* install, as detailed in the next section. - -.. _contributing.windows: - -Creating a Windows development environment ------------------------------------------- +.. _contributing.pip: -To build on Windows, you need to have compilers installed to build the extensions. You will need to install the appropriate Visual Studio compilers, VS 2008 for Python 2.7, VS 2010 for 3.4, and VS 2015 for Python 3.5 and 3.6. +Creating a Python Environment (pip) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For Python 2.7, you can install the ``mingw`` compiler which will work equivalently to VS 2008:: +If you aren't using conda for you development environment, follow these instructions. +You'll need to have at least python3.5 installed on your system. - conda install -n pandas_dev libpython +.. code-block:: none -or use the `Microsoft Visual Studio VC++ compiler for Python <https://www.microsoft.com/en-us/download/details.aspx?id=44266>`__. Note that you have to check the ``x64`` box to install the ``x64`` extension building capability as this is not installed by default. + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/virtualenvs/pandas-dev + # Any parent directories should already exist + python3 -m venv ~/virtualenvs/pandas-dev + # Activate the virtulaenv + . ~/virtualenvs/pandas-dev/bin/activate -For Python 3.4, you can download and install the `Windows 7.1 SDK <https://www.microsoft.com/en-us/download/details.aspx?id=8279>`__. Read the references below as there may be various gotchas during the installation. - -For Python 3.5 and 3.6, you can download and install the `Visual Studio 2015 Community Edition <https://www.visualstudio.com/en-us/downloads/visual-studio-2015-downloads-vs.aspx>`__. - -Here are some references and blogs: - -- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ -- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit -- https://cowboyprogrammer.org/building-python-wheels-for-windows/ -- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ -- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy + # Install the build dependencies + python -m pip install -r ci/requirements_dev.txt + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . -.. _contributing.getting_source: + # Install additional dependencies + python -m pip install -r ci/requirements-optional-pip.txt -Making changes --------------- +Creating a branch +----------------- -Before making your code changes, it is often necessary to build the code that was -just checked out. There are two primary methods of doing this. +You want your master branch to reflect only production-ready code, so create a +feature branch for making your changes. For example:: -#. The best way to develop *pandas* is to build the C extensions in-place by - running:: + git branch shiny-new-feature + git checkout shiny-new-feature - python setup.py build_ext --inplace +The above can be simplified to:: - If you startup the Python interpreter in the *pandas* source directory you - will call the built C extensions + git checkout -b shiny-new-feature -#. Another very common option is to do a ``develop`` install of *pandas*:: +This changes your working directory to the shiny-new-feature branch. Keep any +changes in this branch specific to one bug or feature so it is clear +what the branch brings to *pandas*. You can have many shiny-new-features +and switch in between them using the git checkout command. - python setup.py develop +To update this branch, you need to retrieve the changes from the master branch:: - This makes a symbolic link that tells the Python interpreter to import *pandas* - from your development directory. Thus, you can always be using the development - version on your system without being inside the clone directory. + git fetch upstream + git rebase upstream/master +This will replay your commits on top of the latest pandas git master. If this +leads to merge conflicts, you must resolve these before submitting your pull +request. If you have uncommitted changes, you will need to ``stash`` them prior +to updating. This will effectively store your changes and they can be reapplied +after updating. .. _contributing.documentation: Contributing to the documentation ================================= -If you're not the developer type, contributing to the documentation is still -of huge value. You don't even have to be an expert on -*pandas* to do so! Something as simple as rewriting small passages for clarity -as you reference the docs is a simple but effective way to contribute. The -next person to read that passage will be in your debt! - -In fact, there are sections of the docs that are worse off after being written -by experts. If something in the docs doesn't make sense to you, updating the -relevant section after you figure it out is a simple way to ensure it will -help the next person. +If you're not the developer type, contributing to the documentation is still of +huge value. You don't even have to be an expert on *pandas* to do so! In fact, +there are sections of the docs that are worse off after being written by +experts. If something in the docs doesn't make sense to you, updating the +relevant section after you figure it out is a great way to ensure it will help +the next person. .. contents:: Documentation: :local: @@ -330,7 +331,7 @@ The utility script ``scripts/api_rst_coverage.py`` can be used to compare the list of methods documented in ``doc/source/api.rst`` (which is used to generate the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) and the actual public methods. -This will identify methods documented in in ``doc/source/api.rst`` that are not actually +This will identify methods documented in ``doc/source/api.rst`` that are not actually class methods, and existing methods that are not documented in ``doc/source/api.rst``. @@ -342,30 +343,6 @@ Requirements First, you need to have a development environment to be able to build pandas (see the docs on :ref:`creating a development environment above <contributing.dev_env>`). -Further, to build the docs, there are some extra requirements: you will need to -have ``sphinx`` and ``ipython`` installed. `numpydoc -<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that -follow the Numpy Docstring Standard (see above), but you don't need to install -this because a local copy of numpydoc is included in the *pandas* source -code. `nbsphinx <https://nbsphinx.readthedocs.io/>`_ is required to build -the Jupyter notebooks included in the documentation. - -If you have a conda environment named ``pandas_dev``, you can install the extra -requirements with:: - - conda install -n pandas_dev sphinx ipython nbconvert nbformat - conda install -n pandas_dev -c conda-forge nbsphinx - -Furthermore, it is recommended to have all :ref:`optional dependencies <install.optional_dependencies>`. -installed. This is not strictly necessary, but be aware that you will see some error -messages when building the docs. This happens because all the code in the documentation -is executed during the doc build, and so code examples using optional dependencies -will generate errors. Run ``pd.show_versions()`` to get an overview of the installed -version of all dependencies. - -.. warning:: - - You need to have ``sphinx`` version >= 1.3.2. Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -386,10 +363,10 @@ If you want to do a full clean build, do:: python make.py clean python make.py html -Starting with *pandas* 0.13.1 you can tell ``make.py`` to compile only a single section -of the docs, greatly reducing the turn-around time for checking your changes. -You will be prompted to delete ``.rst`` files that aren't required. This is okay because -the prior versions of these files can be checked out from git. However, you must make sure +You can tell ``make.py`` to compile only a single section of the docs, greatly +reducing the turn-around time for checking your changes. You will be prompted to +delete ``.rst`` files that aren't required. This is okay because the prior +versions of these files can be checked out from git. However, you must make sure not to commit the file deletions to your Git repository! :: @@ -422,6 +399,8 @@ the documentation are also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__, see also the :ref:`Continuous Integration <contributing.ci>` section. +.. _contributing.code: + Contributing to the code base ============================= @@ -480,7 +459,7 @@ Once configured, you can run the tool as follows:: clang-format modified-c-file This will output what your file will look like if the changes are made, and to apply -them, just run the following command:: +them, run the following command:: clang-format -i modified-c-file @@ -1033,7 +1012,7 @@ delete your branch:: git checkout master git merge upstream/master -Then you can just do:: +Then you can do:: git branch -d shiny-new-feature @@ -1043,3 +1022,6 @@ branch has not actually been merged. The branch will still exist on GitHub, so to delete it there do:: git push origin --delete shiny-new-feature + + +.. _Gitter: https://gitter.im/pydata/pandas diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 2348a3d10c54f..69913b2c1fbd8 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -53,6 +53,18 @@ the latest web technologies. Its goal is to provide elegant, concise constructio graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. +`seaborn <https://seaborn.pydata.org>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Seaborn is a Python visualization library based on `matplotlib +<http://matplotlib.org>`__. It provides a high-level, dataset-oriented +interface for creating attractive statistical graphics. The plotting functions +in seaborn understand pandas objects and leverage pandas grouping operations +internally to support concise specification of complex visualizations. Seaborn +also goes beyond matplotlib and pandas with the option to perform statistical +estimation while plotting, aggregating across observations and visualizing the +fit of statistical models to emphasize patterns in a dataset. + `yhat/ggplot <https://github.com/yhat/ggplot>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -64,15 +76,6 @@ but a faithful implementation for python users has long been missing. Although s (as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been progressing quickly in that direction. -`Seaborn <https://github.com/mwaskom/seaborn>`__ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although pandas has quite a bit of "just plot it" functionality built-in, visualization and -in particular statistical graphics is a vast field with a long tradition and lots of ground -to cover. The `Seaborn <https://github.com/mwaskom/seaborn>`__ project builds on top of pandas -and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to -more advanced types of plots then those offered by pandas. - `Vincent <https://github.com/wrobstory/vincent>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -222,7 +225,13 @@ Out-of-core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dask is a flexible parallel computing library for analytics. Dask -allow a familiar ``DataFrame`` interface to out-of-core, parallel and distributed computing. +provides a familiar ``DataFrame`` interface for out-of-core, parallel and distributed computing. + +`Dask-ML <https://dask-ml.readthedocs.io/en/latest/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Dask-ML enables parallel and distributed machine learning using Dask alongside existing machine learning libraries like Scikit-Learn, XGBoost, and TensorFlow. + `Blaze <http://blaze.pydata.org/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index fdb002a642d62..b329fac969343 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1835,15 +1835,27 @@ that you've done this: Yikes! +.. _indexing.evaluation_order: + Evaluation order matters ~~~~~~~~~~~~~~~~~~~~~~~~ -Furthermore, in chained expressions, the order may determine whether a copy is returned or not. -If an expression will set values on a copy of a slice, then a ``SettingWithCopy`` -warning will be issued. +When you use chained indexing, the order and type of the indexing operation +partially determine whether the result is a slice into the original object, or +a copy of the slice. + +Pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a +slice is frequently not intentional, but a mistake caused by chained indexing +returning a copy where a slice was expected. + +If you would like pandas to be more or less trusting about assignment to a +chained indexing expression, you can set the :ref:`option <options>` +``mode.chained_assignment`` to one of these values: -You can control the action of a chained assignment via the option ``mode.chained_assignment``, -which can take the values ``['raise','warn',None]``, where showing a warning is the default. +* ``'warn'``, the default, means a ``SettingWithCopyWarning`` is printed. +* ``'raise'`` means pandas will raise a ``SettingWithCopyException`` + you have to deal with. +* ``None`` will suppress the warnings entirely. .. ipython:: python :okwarning: diff --git a/doc/source/install.rst b/doc/source/install.rst index c805f84d0faaa..27dde005e5a87 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -141,28 +141,24 @@ and can take a few minutes to complete. Installing using your Linux distribution's package manager. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The commands in this table will install pandas for Python 2 from your distribution. -To install pandas for Python 3 you may need to use the package ``python3-pandas``. +The commands in this table will install pandas for Python 3 from your distribution. +To install pandas for Python 2 you may need to use the package ``python-pandas``. .. csv-table:: :header: "Distribution", "Status", "Download / Repository Link", "Install method" :widths: 10, 10, 20, 50 - Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, unstable (daily builds), `PythonXY PPA <https://code.launchpad.net/~pythonxy/+archive/pythonxy-devel>`__; activate by: ``sudo add-apt-repository ppa:pythonxy/pythonxy-devel && sudo apt-get update``, ``sudo apt-get install python-pandas`` - OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python-pandas`` - Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python-pandas`` - Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python-pandas`` - - - - - - + Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas`` + Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas`` + Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas`` + Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas`` +**However**, the packages in the linux package managers are often a few versions behind, so +to get the newest version of pandas, it's recommended to install using the ``pip`` or ``conda`` +methods described above. Installing from source @@ -258,7 +254,8 @@ Optional Dependencies <http://www.vergenet.net/~conrad/software/xsel/>`__, or `xclip <https://github.com/astrand/xclip/>`__: necessary to use :func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation. -* For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__ +* `pandas-gbq <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__: for Google BigQuery I/O. + * `Backports.lzma <https://pypi.python.org/pypi/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library. * One of the following combinations of libraries is needed to use the diff --git a/doc/source/io.rst b/doc/source/io.rst index 82cb83c168b22..ba33c449e701f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -103,15 +103,20 @@ Column and Index Locations and Names ++++++++++++++++++++++++++++++++++++ header : int or list of ints, default ``'infer'`` - Row number(s) to use as the column names, and the start of the data. Default - behavior is as if ``header=0`` if no ``names`` passed, otherwise as if - ``header=None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of ints that specify row locations for a - multi-index on the columns e.g. ``[0,1,3]``. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first line of the file, if column names are + passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to replace + existing names. + + The header can be a list of ints that specify row locations + for a multi-index on the columns e.g. ``[0,1,3]``. Intervening rows + that are not specified will be skipped (e.g. 2 in this example is + skipped). Note that this parameter ignores commented lines and empty + lines if ``skip_blank_lines=True``, so header=0 denotes the first + line of data rather than the first line of the file. names : array-like, default ``None`` List of column names to use. If file contains no header row, then you should explicitly pass ``header=None``. Duplicates in this list will cause @@ -553,6 +558,14 @@ If the header is in a row other than the first, pass the row number to data = 'skip this skip it\na,b,c\n1,2,3\n4,5,6\n7,8,9' pd.read_csv(StringIO(data), header=1) +.. note:: + + Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first nonblank line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. + .. _io.dupe_names: Duplicate names parsing @@ -4469,8 +4482,10 @@ Several caveats. - This is a newer library, and the format, though stable, is not guaranteed to be backward compatible to the earlier versions. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index()`` in order to store the index. +- The format will NOT write an ``Index``, or ``MultiIndex`` for the + ``DataFrame`` and will raise an error if a non-default one is provided. You + can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to + ignore it. - Duplicate column names and non-string columns names are not supported - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. @@ -4533,9 +4548,8 @@ dtypes, including extension dtypes such as datetime with tz. Several caveats. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index(drop=True)`` in order to store the index. - Duplicate column names and non-string columns names are not supported +- Index level names, if specified, must be strings - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. @@ -4580,6 +4594,15 @@ Read from a parquet file. result.dtypes +Read only certain columns of a parquet file. + +.. ipython:: python + + result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) + + result.dtypes + + .. ipython:: python :suppress: diff --git a/doc/source/options.rst b/doc/source/options.rst index 2da55a5a658a4..505a5ade68de0 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -273,164 +273,167 @@ Options are 'right', and 'left'. Available Options ----------------- -=================================== ============ ================================== -Option Default Function -=================================== ============ ================================== -display.chop_threshold None If set to a float value, all float - values smaller then the given - threshold will be displayed as - exactly 0 by repr and friends. -display.colheader_justify right Controls the justification of - column headers. used by DataFrameFormatter. -display.column_space 12 No description available. -display.date_dayfirst False When True, prints and parses dates - with the day first, eg 20/01/2005 -display.date_yearfirst False When True, prints and parses dates - with the year first, eg 2005/01/20 -display.encoding UTF-8 Defaults to the detected encoding - of the console. Specifies the encoding - to be used for strings returned by - to_string, these are generally strings - meant to be displayed on the console. -display.expand_frame_repr True Whether to print out the full DataFrame - repr for wide DataFrames across - multiple lines, `max_columns` is - still respected, but the output will - wrap-around across multiple "pages" - if its width exceeds `display.width`. -display.float_format None The callable should accept a floating - point number and return a string with - the desired format of the number. - This is used in some places like - SeriesFormatter. - See core.format.EngFormatter for an example. -display.large_repr truncate For DataFrames exceeding max_rows/max_cols, - the repr (and HTML repr) can show - a truncated table (the default), - or switch to the view from df.info() - (the behaviour in earlier versions of pandas). - allowable settings, ['truncate', 'info'] -display.latex.repr False Whether to produce a latex DataFrame - representation for jupyter frontends - that support it. -display.latex.escape True Escapes special characters in DataFrames, when - using the to_latex method. -display.latex.longtable False Specifies if the to_latex method of a DataFrame - uses the longtable format. -display.latex.multicolumn True Combines columns when using a MultiIndex -display.latex.multicolumn_format 'l' Alignment of multicolumn labels -display.latex.multirow False Combines rows when using a MultiIndex. - Centered instead of top-aligned, - separated by clines. -display.max_columns 20 max_rows and max_columns are used - in __repr__() methods to decide if - to_string() or info() is used to - render an object to a string. In - case python/IPython is running in - a terminal this can be set to 0 and - pandas will correctly auto-detect - the width the terminal and swap to - a smaller format in case all columns - would not fit vertically. The IPython - notebook, IPython qtconsole, or IDLE - do not run in a terminal and hence - it is not possible to do correct - auto-detection. 'None' value means - unlimited. -display.max_colwidth 50 The maximum width in characters of - a column in the repr of a pandas - data structure. When the column overflows, - a "..." placeholder is embedded in - the output. -display.max_info_columns 100 max_info_columns is used in DataFrame.info - method to decide if per column information - will be printed. -display.max_info_rows 1690785 df.info() will usually show null-counts - for each column. For large frames - this can be quite slow. max_info_rows - and max_info_cols limit this null - check only to frames with smaller - dimensions then specified. -display.max_rows 60 This sets the maximum number of rows - pandas should output when printing - out various output. For example, - this value determines whether the - repr() for a dataframe prints out - fully or just a summary repr. - 'None' value means unlimited. -display.max_seq_items 100 when pretty-printing a long sequence, - no more then `max_seq_items` will - be printed. If items are omitted, - they will be denoted by the addition - of "..." to the resulting string. - If set to None, the number of items - to be printed is unlimited. -display.memory_usage True This specifies if the memory usage of - a DataFrame should be displayed when the - df.info() method is invoked. -display.multi_sparse True "Sparsify" MultiIndex display (don't - display repeated elements in outer - levels within groups) -display.notebook_repr_html True When True, IPython notebook will - use html representation for - pandas objects (if it is available). -display.pprint_nest_depth 3 Controls the number of nested levels - to process when pretty-printing -display.precision 6 Floating point output precision in - terms of number of places after the - decimal, for regular formatting as well - as scientific notation. Similar to - numpy's ``precision`` print option -display.show_dimensions truncate Whether to print out dimensions - at the end of DataFrame repr. - If 'truncate' is specified, only - print out the dimensions if the - frame is truncated (e.g. not display - all rows and/or columns) -display.width 80 Width of the display in characters. - In case python/IPython is running in - a terminal this can be set to None - and pandas will correctly auto-detect - the width. Note that the IPython notebook, - IPython qtconsole, or IDLE do not run in a - terminal and hence it is not possible - to correctly detect the width. -display.html.table_schema False Whether to publish a Table Schema - representation for frontends that - support it. -display.html.border 1 A ``border=value`` attribute is - inserted in the ``<table>`` tag - for the DataFrame HTML repr. -io.excel.xls.writer xlwt The default Excel writer engine for - 'xls' files. -io.excel.xlsm.writer openpyxl The default Excel writer engine for - 'xlsm' files. Available options: - 'openpyxl' (the default). -io.excel.xlsx.writer openpyxl The default Excel writer engine for - 'xlsx' files. -io.hdf.default_format None default format writing format, if - None, then put will default to - 'fixed' and append will default to - 'table' -io.hdf.dropna_table True drop ALL nan rows when appending - to a table -io.parquet.engine None The engine to use as a default for - parquet reading and writing. If None - then try 'pyarrow' and 'fastparquet' -mode.chained_assignment warn Raise an exception, warn, or no - action if trying to use chained - assignment, The default is warn -mode.sim_interactive False Whether to simulate interactive mode - for purposes of testing. -mode.use_inf_as_na False True means treat None, NaN, -INF, - INF as NA (old way), False means - None and NaN are null, but INF, -INF - are not NA (new way). -compute.use_bottleneck True Use the bottleneck library to accelerate - computation if it is installed. -compute.use_numexpr True Use the numexpr library to accelerate - computation if it is installed. -=================================== ============ ================================== +======================================= ============ ================================== +Option Default Function +======================================= ============ ================================== +display.chop_threshold None If set to a float value, all float + values smaller then the given + threshold will be displayed as + exactly 0 by repr and friends. +display.colheader_justify right Controls the justification of + column headers. used by DataFrameFormatter. +display.column_space 12 No description available. +display.date_dayfirst False When True, prints and parses dates + with the day first, eg 20/01/2005 +display.date_yearfirst False When True, prints and parses dates + with the year first, eg 2005/01/20 +display.encoding UTF-8 Defaults to the detected encoding + of the console. Specifies the encoding + to be used for strings returned by + to_string, these are generally strings + meant to be displayed on the console. +display.expand_frame_repr True Whether to print out the full DataFrame + repr for wide DataFrames across + multiple lines, `max_columns` is + still respected, but the output will + wrap-around across multiple "pages" + if its width exceeds `display.width`. +display.float_format None The callable should accept a floating + point number and return a string with + the desired format of the number. + This is used in some places like + SeriesFormatter. + See core.format.EngFormatter for an example. +display.large_repr truncate For DataFrames exceeding max_rows/max_cols, + the repr (and HTML repr) can show + a truncated table (the default), + or switch to the view from df.info() + (the behaviour in earlier versions of pandas). + allowable settings, ['truncate', 'info'] +display.latex.repr False Whether to produce a latex DataFrame + representation for jupyter frontends + that support it. +display.latex.escape True Escapes special characters in DataFrames, when + using the to_latex method. +display.latex.longtable False Specifies if the to_latex method of a DataFrame + uses the longtable format. +display.latex.multicolumn True Combines columns when using a MultiIndex +display.latex.multicolumn_format 'l' Alignment of multicolumn labels +display.latex.multirow False Combines rows when using a MultiIndex. + Centered instead of top-aligned, + separated by clines. +display.max_columns 20 max_rows and max_columns are used + in __repr__() methods to decide if + to_string() or info() is used to + render an object to a string. In + case python/IPython is running in + a terminal this can be set to 0 and + pandas will correctly auto-detect + the width the terminal and swap to + a smaller format in case all columns + would not fit vertically. The IPython + notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence + it is not possible to do correct + auto-detection. 'None' value means + unlimited. +display.max_colwidth 50 The maximum width in characters of + a column in the repr of a pandas + data structure. When the column overflows, + a "..." placeholder is embedded in + the output. +display.max_info_columns 100 max_info_columns is used in DataFrame.info + method to decide if per column information + will be printed. +display.max_info_rows 1690785 df.info() will usually show null-counts + for each column. For large frames + this can be quite slow. max_info_rows + and max_info_cols limit this null + check only to frames with smaller + dimensions then specified. +display.max_rows 60 This sets the maximum number of rows + pandas should output when printing + out various output. For example, + this value determines whether the + repr() for a dataframe prints out + fully or just a summary repr. + 'None' value means unlimited. +display.max_seq_items 100 when pretty-printing a long sequence, + no more then `max_seq_items` will + be printed. If items are omitted, + they will be denoted by the addition + of "..." to the resulting string. + If set to None, the number of items + to be printed is unlimited. +display.memory_usage True This specifies if the memory usage of + a DataFrame should be displayed when the + df.info() method is invoked. +display.multi_sparse True "Sparsify" MultiIndex display (don't + display repeated elements in outer + levels within groups) +display.notebook_repr_html True When True, IPython notebook will + use html representation for + pandas objects (if it is available). +display.pprint_nest_depth 3 Controls the number of nested levels + to process when pretty-printing +display.precision 6 Floating point output precision in + terms of number of places after the + decimal, for regular formatting as well + as scientific notation. Similar to + numpy's ``precision`` print option +display.show_dimensions truncate Whether to print out dimensions + at the end of DataFrame repr. + If 'truncate' is specified, only + print out the dimensions if the + frame is truncated (e.g. not display + all rows and/or columns) +display.width 80 Width of the display in characters. + In case python/IPython is running in + a terminal this can be set to None + and pandas will correctly auto-detect + the width. Note that the IPython notebook, + IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible + to correctly detect the width. +display.html.table_schema False Whether to publish a Table Schema + representation for frontends that + support it. +display.html.border 1 A ``border=value`` attribute is + inserted in the ``<table>`` tag + for the DataFrame HTML repr. +io.excel.xls.writer xlwt The default Excel writer engine for + 'xls' files. +io.excel.xlsm.writer openpyxl The default Excel writer engine for + 'xlsm' files. Available options: + 'openpyxl' (the default). +io.excel.xlsx.writer openpyxl The default Excel writer engine for + 'xlsx' files. +io.hdf.default_format None default format writing format, if + None, then put will default to + 'fixed' and append will default to + 'table' +io.hdf.dropna_table True drop ALL nan rows when appending + to a table +io.parquet.engine None The engine to use as a default for + parquet reading and writing. If None + then try 'pyarrow' and 'fastparquet' +mode.chained_assignment warn Controls ``SettingWithCopyWarning``: + 'raise', 'warn', or None. Raise an + exception, warn, or no action if + trying to use :ref:`chained assignment <indexing.evaluation_order>`. +mode.sim_interactive False Whether to simulate interactive mode + for purposes of testing. +mode.use_inf_as_na False True means treat None, NaN, -INF, + INF as NA (old way), False means + None and NaN are null, but INF, -INF + are not NA (new way). +compute.use_bottleneck True Use the bottleneck library to accelerate + computation if it is installed. +compute.use_numexpr True Use the numexpr library to accelerate + computation if it is installed. +plotting.matplotlib.register_converters True Register custom converters with + matplotlib. Set to False to de-register. +======================================= ============ ================================== .. _basics.console_output: diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c3e7f847b485..aea6280a490d6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,103 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.22.0 +------------- + +**Release date:** December 29, 2017 + +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note. + +The only changes are: + +- The sum of an empty or all-*NA* ``Series`` is now ``0`` +- The product of an empty or all-*NA* ``Series`` is now ``1`` +- We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. + +See the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` overview for further explanation +of all the places in the library this affects. + +pandas 0.21.1 +------------- + +**Release date:** December 12, 2017 + +This is a minor bug-fix release in the 0.21.x series and includes some small +regression fixes, bug fixes and performance improvements. We recommend that all +users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who relied implicitly on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.special>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + +See the :ref:`v0.21.1 Whatsnew <whatsnew_0211>` overview for an extensive list +of all the changes for 0.21.1. + +Thanks +~~~~~~ + +A total of 46 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +Contributors +============ + +* Aaron Critchley + +* Alex Rychyk +* Alexander Buchkovsky + +* Alexander Michael Schade + +* Chris Mazzullo +* Cornelius Riemenschneider + +* Dave Hirschfeld + +* David Fischer + +* David Stansby + +* Dror Atariah + +* Eric Kisslinger + +* Hans + +* Ingolf Becker + +* Jan Werkmann + +* Jeff Reback +* Joris Van den Bossche +* Jörg Döpfert + +* Kevin Kuhl + +* Krzysztof Chomski + +* Leif Walsh +* Licht Takeuchi +* Manraj Singh + +* Matt Braymer-Hayes + +* Michael Waskom + +* Mie~~~ + +* Peter Hoffmann + +* Robert Meyer + +* Sam Cohan + +* Sietse Brouwer + +* Sven + +* Tim Swast +* Tom Augspurger +* Wes Turner +* William Ayd + +* Yee Mey + +* bolkedebruin + +* cgohlke +* derestle-htwg + +* fjdiod + +* gabrielclow + +* gfyoung +* ghasemnaddaf + +* jbrockmendel +* jschendel +* miker985 + +* topper-123 + pandas 0.21.0 ------------- @@ -52,7 +149,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 3385bafc26467..64cbe0b050a61 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,10 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.22.0.txt + +.. include:: whatsnew/v0.21.1.txt + .. include:: whatsnew/v0.21.0.txt .. include:: whatsnew/v0.20.3.txt diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 4c460eeb85b82..3e673bd4cbc28 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -12,7 +12,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). @@ -369,11 +369,17 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`). .. _whatsnew_0210.api_breaking.bottleneck: -Sum/Prod of all-NaN Series/DataFrames is now consistently NaN -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + + The changes described here have been partially reverted. See + the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` for more. + The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames no longer depends on -whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed. (:issue:`9422`, :issue:`15507`). +whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and return value of ``sum`` and ``prod`` on an empty Series has changed (:issue:`9422`, :issue:`15507`). Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, will result in ``NaN``. See the :ref:`docs <missing_data.numeric_sum>`. @@ -381,35 +387,35 @@ Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of s = Series([np.nan]) -Previously NO ``bottleneck`` +Previously WITHOUT ``bottleneck`` installed: .. code-block:: ipython In [2]: s.sum() Out[2]: np.nan -Previously WITH ``bottleneck`` +Previously WITH ``bottleneck``: .. code-block:: ipython In [2]: s.sum() Out[2]: 0.0 -New Behavior, without regard to the bottleneck installation. +New Behavior, without regard to the bottleneck installation: .. ipython:: python s.sum() -Note that this also changes the sum of an empty ``Series`` - -Previously regardless of ``bottlenck`` +Note that this also changes the sum of an empty ``Series``. Previously this always returned 0 regardless of a ``bottlenck`` installation: .. code-block:: ipython In [1]: pd.Series([]).sum() Out[1]: 0 +but for consistency with the all-NaN case, this was changed to return NaN as well: + .. ipython:: python pd.Series([]).sum() @@ -877,6 +883,28 @@ New Behavior: pd.interval_range(start=0, end=4) +.. _whatsnew_0210.api.mpl_converters: + +No Automatic Matplotlib Converters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas no longer registers our ``date``, ``time``, ``datetime``, +``datetime64``, and ``Period`` converters with matplotlib when pandas is +imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not +nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You +must explicitly register these methods: + +.. ipython:: python + + from pandas.tseries import converter + converter.register() + + fig, ax = plt.subplots() + plt.plot(pd.date_range('2017', periods=6), range(6)) + +Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these +converters on first-use (:issue:17710). + .. _whatsnew_0210.api: Other API Changes @@ -900,8 +928,6 @@ Other API Changes - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) - Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`). -- Pandas no longer registers matplotlib converters on import. The converters - will be registered and used when the first plot is draw (:issue:`17710`) .. _whatsnew_0210.deprecations: diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 422a239e86ece..9d065d71a4801 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -1,56 +1,94 @@ .. _whatsnew_0211: -v0.21.1 -------- +v0.21.1 (December 12, 2017) +--------------------------- -This is a minor release from 0.21.1 and includes a number of deprecations, new -features, enhancements, and performance improvements along with a large number -of bug fixes. We recommend that all users upgrade to this version. +This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, +bug fixes and performance improvements. +We recommend that all users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who implicitly relied on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.converters>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + + +.. contents:: What's new in v0.21.1 + :local: + :backlinks: none + + +.. _whatsnew_0211.converters: + +Restore Matplotlib datetime Converter Registration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pandas implements some matplotlib converters for nicely formatting the axis +labels on plots with ``datetime`` or ``Period`` values. Prior to pandas 0.21.0, +these were implicitly registered with matplotlib, as a side effect of ``import +pandas``. + +In pandas 0.21.0, we required users to explicitly register the +converter. This caused problems for some users who relied on those converters +being present for regular ``matplotlib.pyplot`` plotting methods, so we're +temporarily reverting that change; pandas 0.21.1 again registers the converters on +import, just like before 0.21.0. + +We've added a new option to control the converters: +``pd.options.plotting.matplotlib.register_converters``. By default, they are +registered. Toggling this to ``False`` removes pandas' formatters and restore +any converters we overwrote when registering them (:issue:`18301`). + +We're working with the matplotlib developers to make this easier. We're trying +to balance user convenience (automatically registering the converters) with +import performance and best practices (importing pandas shouldn't have the side +effect of overwriting any custom converters you've already set). In the future +we hope to have most of the datetime formatting functionality in matplotlib, +with just the pandas-specific converters in pandas. We'll then gracefully +deprecate the automatic registration of converters in favor of users explicitly +registering them when they want them. .. _whatsnew_0211.enhancements: New features ~~~~~~~~~~~~ -- -- -- +.. _whatsnew_0211.enhancements.parquet: + +Improvements to the Parquet IO functionality +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- :func:`DataFrame.to_parquet` will now write non-default indexes when the + underlying engine supports it. The indexes will be preserved when reading + back in with :func:`read_parquet` (:issue:`18581`). +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) .. _whatsnew_0211.enhancements.other: Other Enhancements ^^^^^^^^^^^^^^^^^^ -- -- -- +- :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) +- :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). .. _whatsnew_0211.deprecations: Deprecations ~~~~~~~~~~~~ -- -- -- +- ``pandas.tseries.register`` has been renamed to + :func:`pandas.plotting.register_matplotlib_converters`` (:issue:`18301`) .. _whatsnew_0211.performance: Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- - -.. _whatsnew_0211.docs: - -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ - -- -- -- +- Improved performance of plotting large series/dataframes (:issue:`18236`). .. _whatsnew_0211.bug_fixes: @@ -60,65 +98,78 @@ Bug Fixes Conversion ^^^^^^^^^^ -- -- -- +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) +- Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) +- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) +- Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) +- Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). Indexing ^^^^^^^^ -- -- -- +- Bug in a boolean comparison of a ``datetime.datetime`` and a ``datetime64[ns]`` dtype Series (:issue:`17965`) +- Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`) +- Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`) +- Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`) +- Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`) I/O ^^^ +- Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. +- Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) +- Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) +- Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) +- Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) +- Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). +- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) +- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) +- Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) +- Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) +- Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) +- Bug in :meth:`DataFrame.to_latex` with ``longtable=True`` where a latex multicolumn always spanned over three columns (:issue:`17959`) + + Plotting ^^^^^^^^ -- -- -- +- Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`) Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- -- -- - -Sparse -^^^^^^ - -- -- -- +- Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`) +- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) +- Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) +- Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) Reshaping ^^^^^^^^^ -- -- -- +- Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) +- Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) +- Bug in ``DataFrame.filter(...)`` when :class:`unicode` is passed as a condition in Python 2 (:issue:`13101`) +- Bug when merging empty DataFrames when ``np.seterr(divide='raise')`` is set (:issue:`17776`) Numeric ^^^^^^^ -- -- -- +- Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) Categorical ^^^^^^^^^^^ -- -- -- +- Bug in :meth:`DataFrame.astype` where casting to 'category' on an empty ``DataFrame`` causes a segmentation fault (:issue:`18004`) +- Error messages in the testing module have been improved when items have + different ``CategoricalDtype`` (:issue:`18069`) +- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`) +- Bug in ``Categorical.unique()`` returning read-only ``codes`` array when all categories were ``NaN`` (:issue:`18051`) +- Bug in ``DataFrame.groupby(axis=1)`` with a ``CategoricalIndex`` (:issue:`18432`) -Other -^^^^^ +String +^^^^^^ -- -- -- +- :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 53b052a955b45..d165339cb0de9 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -1,156 +1,243 @@ .. _whatsnew_0220: -v0.22.0 -------- +v0.22.0 (December 29, 2017) +--------------------------- -This is a major release from 0.21.1 and includes a number of API changes, -deprecations, new features, enhancements, and performance improvements along -with a large number of bug fixes. We recommend that all users upgrade to this -version. +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note (singular!). -.. _whatsnew_0220.enhancements: +.. _whatsnew_0220.api_breaking: -New features -~~~~~~~~~~~~ +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- +Pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The +summary is that -.. _whatsnew_0220.enhancements.other: +* The sum of an empty or all-*NA* ``Series`` is now ``0`` +* The product of an empty or all-*NA* ``Series`` is now ``1`` +* We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. -Other Enhancements -^^^^^^^^^^^^^^^^^^ +Some background: In pandas 0.21, we fixed a long-standing inconsistency +in the return value of all-*NA* series depending on whether or not bottleneck +was installed. See :ref:`whatsnew_0210.api_breaking.bottleneck`. At the same +time, we changed the sum and prod of an empty ``Series`` to also be ``NaN``. -- -- -- +Based on feedback, we've partially reverted those changes. -.. _whatsnew_0220.api_breaking: +Arithmetic Operations +^^^^^^^^^^^^^^^^^^^^^ -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The default sum for empty or all-*NA* ``Series`` is now ``0``. -- -- -- +*pandas 0.21.x* -.. _whatsnew_0220.api: +.. code-block:: ipython -Other API Changes -^^^^^^^^^^^^^^^^^ + In [1]: pd.Series([]).sum() + Out[1]: nan -- -- -- + In [2]: pd.Series([np.nan]).sum() + Out[2]: nan -.. _whatsnew_0220.deprecations: +*pandas 0.22.0* -Deprecations -~~~~~~~~~~~~ +.. ipython:: python -- -- -- + pd.Series([]).sum() + pd.Series([np.nan]).sum() -.. _whatsnew_0220.prior_deprecations: +The default behavior is the same as pandas 0.20.3 with bottleneck installed. It +also matches the behavior of NumPy's ``np.nansum`` on empty and all-*NA* arrays. -Removal of prior version deprecations/changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To have the sum of an empty series return ``NaN`` (the default behavior of +pandas 0.20.3 without bottleneck, or pandas 0.21.x), use the ``min_count`` +keyword. -- -- -- +.. ipython:: python -.. _whatsnew_0220.performance: + pd.Series([]).sum(min_count=1) -Performance Improvements -~~~~~~~~~~~~~~~~~~~~~~~~ +Thanks to the ``skipna`` parameter, the ``.sum`` on an all-*NA* +series is conceptually the same as the ``.sum`` of an empty one with +``skipna=True`` (the default). -- -- -- +.. ipython:: python -.. _whatsnew_0220.docs: + pd.Series([np.nan]).sum(min_count=1) # skipna=True by default -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ +The ``min_count`` parameter refers to the minimum number of *non-null* values +required for a non-NA sum or product. -- -- -- +:meth:`Series.prod` has been updated to behave the same as :meth:`Series.sum`, +returning ``1`` instead. -.. _whatsnew_0220.bug_fixes: +.. ipython:: python -Bug Fixes -~~~~~~~~~ + pd.Series([]).prod() + pd.Series([np.nan]).prod() + pd.Series([]).prod(min_count=1) -Conversion -^^^^^^^^^^ +These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well. +Finally, a few less obvious places in pandas are affected by this change. -- -- -- +Grouping by a Categorical +^^^^^^^^^^^^^^^^^^^^^^^^^ -Indexing -^^^^^^^^ +Grouping by a ``Categorical`` and summing now returns ``0`` instead of +``NaN`` for categories with no observations. The product now returns ``1`` +instead of ``NaN``. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [8]: grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + + In [9]: pd.Series([1, 2]).groupby(grouper).sum() + Out[9]: + a 3.0 + b NaN + dtype: float64 -- -- -- +*pandas 0.22* -I/O -^^^ +.. ipython:: python -- -- -- + grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + pd.Series([1, 2]).groupby(grouper).sum() -Plotting +To restore the 0.21 behavior of returning ``NaN`` for unobserved groups, +use ``min_count>=1``. + +.. ipython:: python + + pd.Series([1, 2]).groupby(grouper).sum(min_count=1) + +Resample ^^^^^^^^ -- -- -- +The sum and product of all-*NA* bins has changed from ``NaN`` to ``0`` for +sum and ``1`` for product. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [11]: s = pd.Series([1, 1, np.nan, np.nan], + ...: index=pd.date_range('2017', periods=4)) + ...: s + Out[11]: + 2017-01-01 1.0 + 2017-01-02 1.0 + 2017-01-03 NaN + 2017-01-04 NaN + Freq: D, dtype: float64 + + In [12]: s.resample('2d').sum() + Out[12]: + 2017-01-01 2.0 + 2017-01-03 NaN + Freq: 2D, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + s = pd.Series([1, 1, np.nan, np.nan], + index=pd.date_range('2017', periods=4)) + s.resample('2d').sum() + +To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``. + +.. ipython:: python + + s.resample('2d').sum(min_count=1) + +In particular, upsampling and taking the sum or product is affected, as +upsampling introduces missing values even if the original series was +entirely valid. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [14]: idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + + In [15]: pd.Series([1, 2], index=idx).resample('12H').sum() + Out[15]: + 2017-01-01 00:00:00 1.0 + 2017-01-01 12:00:00 NaN + 2017-01-02 00:00:00 2.0 + Freq: 12H, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + pd.Series([1, 2], index=idx).resample("12H").sum() + +Once again, the ``min_count`` keyword is available to restore the 0.21 behavior. + +.. ipython:: python + + pd.Series([1, 2], index=idx).resample("12H").sum(min_count=1) + +Rolling and Expanding +^^^^^^^^^^^^^^^^^^^^^ + +Rolling and expanding already have a ``min_periods`` keyword that behaves +similar to ``min_count``. The only case that changes is when doing a rolling +or expanding sum with ``min_periods=0``. Previously this returned ``NaN``, +when fewer than ``min_periods`` non-*NA* values were in the window. Now it +returns ``0``. + +*pandas 0.21.1* + +.. code-block:: ipython + + In [17]: s = pd.Series([np.nan, np.nan]) + + In [18]: s.rolling(2, min_periods=0).sum() + Out[18]: + 0 NaN + 1 NaN + dtype: float64 -Groupby/Resample/Rolling -^^^^^^^^^^^^^^^^^^^^^^^^ +*pandas 0.22.0* -- -- -- +.. ipython:: python -Sparse -^^^^^^ + s = pd.Series([np.nan, np.nan]) + s.rolling(2, min_periods=0).sum() -- -- -- +The default behavior of ``min_periods=None``, implying that ``min_periods`` +equals the window size, is unchanged. -Reshaping -^^^^^^^^^ +Compatibility +~~~~~~~~~~~~~ -- -- -- +If you maintain a library that should work across pandas versions, it +may be easiest to exclude pandas 0.21 from your requirements. Otherwise, all your +``sum()`` calls would need to check if the ``Series`` is empty before summing. -Numeric -^^^^^^^ +With setuptools, in your ``setup.py`` use:: -- -- -- + install_requires=['pandas!=0.21.*', ...] -Categorical -^^^^^^^^^^^ +With conda, use -- -- -- +.. code-block:: yaml -Other -^^^^^ + requirements: + run: + - pandas !=0.21.0,!=0.21.1 -- -- -- +Note that the inconsistency in the return value for all-*NA* series is still +there for pandas 0.20.3 and earlier. Avoiding pandas 0.21 will only help with +the empty case. diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index d159761c3f5e6..a44a7288bda45 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -87,7 +87,7 @@ class NegInfinity(object): @cython.boundscheck(False) def is_lexsorted(list list_of_arrays): cdef: - int i + Py_ssize_t i Py_ssize_t n, nlevels int64_t k, cur, pre ndarray arr @@ -99,11 +99,12 @@ def is_lexsorted(list list_of_arrays): cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*)) for i in range(nlevels): arr = list_of_arrays[i] + assert arr.dtype.name == 'int64' vecs[i] = <int64_t*> arr.data # Assume uniqueness?? with nogil: - for i in range(n): + for i in range(1, n): for k in range(nlevels): cur = vecs[k][i] pre = vecs[k][i -1] diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index d38b677df321c..14d47398ac1df 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -36,7 +36,8 @@ def get_dispatch(dtypes): def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -88,7 +89,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = sumx[i, j] @@ -99,7 +100,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -147,7 +149,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = prodx[i, j] @@ -159,12 +161,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, ct, oldmean ndarray[{{dest_type2}}, ndim=2] nobs, mean + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -208,12 +213,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] sumx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -263,7 +271,8 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -272,6 +281,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count Py_ssize_t ngroups = len(counts) + assert min_count == -1, "'min_count' only used in add and prod" + if len(labels) == 0: return @@ -332,7 +343,8 @@ def get_dispatch(dtypes): def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -342,6 +354,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -382,7 +396,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): + ndarray[int64_t] labels, int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -392,6 +407,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -455,7 +472,8 @@ def get_dispatch(dtypes): def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -464,6 +482,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] maxx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -526,7 +546,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -535,6 +556,8 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] minx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -686,7 +709,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -695,6 +719,9 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] _counts ndarray data float64_t* ptr + + assert min_count == -1, "'min_count' only used in add and prod" + ngroups = len(counts) N, K = (<object> values).shape diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index c96251a0293d6..65e99f5f46fc2 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -19,7 +19,7 @@ from hashtable cimport HashTable from pandas._libs import algos, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta -from datetime import datetime, timedelta +from datetime import datetime, timedelta, date from cpython cimport PyTuple_Check, PyList_Check @@ -500,7 +500,7 @@ cpdef convert_scalar(ndarray arr, object value): if arr.descr.type_num == NPY_DATETIME: if isinstance(value, np.ndarray): pass - elif isinstance(value, datetime): + elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value elif value is None or value != value: return iNaT diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 0dacdf70a71d5..a90039d789972 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -374,6 +374,17 @@ cdef class TextReader: float_precision=None, skip_blank_lines=True): + # set encoding for native Python and C library + if encoding is not None: + if not isinstance(encoding, bytes): + encoding = encoding.encode('utf-8') + encoding = encoding.lower() + self.c_encoding = <char*> encoding + else: + self.c_encoding = NULL + + self.encoding = encoding + self.parser = parser_new() self.parser.chunksize = tokenize_chunksize @@ -495,17 +506,6 @@ cdef class TextReader: self.parser.double_converter_nogil = NULL self.parser.double_converter_withgil = round_trip - # encoding - if encoding is not None: - if not isinstance(encoding, bytes): - encoding = encoding.encode('utf-8') - encoding = encoding.lower() - self.c_encoding = <char*> encoding - else: - self.c_encoding = NULL - - self.encoding = encoding - if isinstance(dtype, dict): dtype = {k: pandas_dtype(dtype[k]) for k in dtype} @@ -684,6 +684,14 @@ cdef class TextReader: else: raise ValueError('Unrecognized compression type: %s' % self.compression) + + if b'utf-16' in (self.encoding or b''): + # we need to read utf-16 through UTF8Recoder. + # if source is utf-16, convert source to utf-8 by UTF8Recoder. + source = com.UTF8Recoder(source, self.encoding.decode('utf-8')) + self.encoding = b'utf-8' + self.c_encoding = <char*> self.encoding + self.handle = source if isinstance(source, basestring): @@ -2213,9 +2221,10 @@ def _concatenate_chunks(list chunks): for name in names: arrs = [chunk.pop(name) for chunk in chunks] # Check each arr for consistent types. - dtypes = set([a.dtype for a in arrs]) - if len(dtypes) > 1: - common_type = np.find_common_type(dtypes, []) + dtypes = {a.dtype for a in arrs} + numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)} + if len(numpy_dtypes) > 1: + common_type = np.find_common_type(numpy_dtypes, []) if common_type == np.object: warning_columns.append(str(name)) diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index b0a64e1ccc225..c340e870e9722 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -349,13 +349,13 @@ def infer_dtype(object value, bint skipna=False): if values.dtype != np.object_: values = values.astype('O') + # make contiguous + values = values.ravel() + n = len(values) if n == 0: return 'empty' - # make contiguous - values = values.ravel() - # try to use a valid value for i in range(n): val = util.get_value_1d(values, i) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index a0aae6a5de707..20b974ce5a659 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -961,8 +961,7 @@ class NaTType(_NaT): combine = _make_error_func('combine', None) utcnow = _make_error_func('utcnow', None) - if PY3: - timestamp = _make_error_func('timestamp', datetime) + timestamp = _make_error_func('timestamp', Timestamp) # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or # return NaT create functions that raise, for binding to NaTType @@ -1409,6 +1408,11 @@ cdef class _Timestamp(datetime): def __get__(self): return np.datetime64(self.value, 'ns') + def timestamp(self): + """Return POSIX timestamp as float.""" + # py27 compat, see GH#17329 + return round(self.value / 1e9, 6) + cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp @@ -3366,7 +3370,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): """ Convert the val (in i8) from timezone1 to timezone2 - This is a single timezone versoin of tz_convert + This is a single timezone version of tz_convert Parameters ---------- diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 7f778dde86e23..ba7031bc382b1 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -283,10 +283,9 @@ cdef object get_dst_info(object tz): def infer_tzinfo(start, end): if start is not None and end is not None: tz = start.tzinfo - if end.tzinfo: - if not (get_timezone(tz) == get_timezone(end.tzinfo)): - msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' - raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) + if not (get_timezone(tz) == get_timezone(end.tzinfo)): + msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' + raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) elif start is not None: tz = start.tzinfo elif end is not None: diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index b6bd6f92f6199..3a7a6d54d3851 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -225,14 +225,16 @@ cdef class VariableWindowIndexer(WindowIndexer): right_closed: bint right endpoint closedness True if the right endpoint is closed, False if open - + floor: optional + unit for flooring the unit """ def __init__(self, ndarray input, int64_t win, int64_t minp, - bint left_closed, bint right_closed, ndarray index): + bint left_closed, bint right_closed, ndarray index, + object floor=None): self.is_variable = 1 self.N = len(index) - self.minp = _check_minp(win, minp, self.N) + self.minp = _check_minp(win, minp, self.N, floor=floor) self.start = np.empty(self.N, dtype='int64') self.start.fill(-1) @@ -347,7 +349,7 @@ def get_window_indexer(input, win, minp, index, closed, if index is not None: indexer = VariableWindowIndexer(input, win, minp, left_closed, - right_closed, index) + right_closed, index, floor) elif use_mock: indexer = MockFixedWindowIndexer(input, win, minp, left_closed, right_closed, index, floor) @@ -446,7 +448,7 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, object index, object closed): cdef: double val, prev_x, sum_x = 0 - int64_t s, e + int64_t s, e, range_endpoint int64_t nobs = 0, i, j, N bint is_variable ndarray[int64_t] start, end @@ -454,7 +456,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, - closed) + closed, + floor=0) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -494,13 +497,15 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, # fixed window + range_endpoint = int_max(minp, 1) - 1 + with nogil: - for i in range(0, minp - 1): + for i in range(0, range_endpoint): add_sum(input[i], &nobs, &sum_x) output[i] = NaN - for i in range(minp - 1, N): + for i in range(range_endpoint, N): val = input[i] add_sum(val, &nobs, &sum_x) @@ -661,9 +666,11 @@ cdef inline void add_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] + 1 - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] + delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] + delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0] cdef inline void remove_var(double val, double *nobs, double *mean_x, @@ -675,9 +682,11 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] - 1 if nobs[0]: - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] - delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] - delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] - ((nobs[0] + 1) * delta ** 2) / nobs[0] else: mean_x[0] = 0 ssqdm_x[0] = 0 @@ -689,7 +698,7 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, Numerically stable implementation using Welford's method. """ cdef: - double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta + double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta, mean_x_old int64_t s, e bint is_variable Py_ssize_t i, j, N @@ -749,6 +758,9 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, add_var(input[i], &nobs, &mean_x, &ssqdm_x) output[i] = calc_var(minp, ddof, nobs, ssqdm_x) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + # After the first window, observations can both be added and # removed for i from win <= i < N: @@ -760,10 +772,12 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, # Adding one observation and removing another one delta = val - prev - prev -= mean_x + mean_x_old = mean_x + mean_x += delta / nobs - val -= mean_x - ssqdm_x += (val + prev) * delta + ssqdm_x += ((nobs - 1) * val + + (nobs + 1) * prev + - 2 * nobs * mean_x_old) * delta / nobs else: add_var(val, &nobs, &mean_x, &ssqdm_x) @@ -788,7 +802,17 @@ cdef inline double calc_skew(int64_t minp, int64_t nobs, double x, double xx, A = x / dnobs B = xx / dnobs - A * A C = xxx / dnobs - A * A * A - 3 * A * B - if B <= 0 or nobs < 3: + + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 3: result = NaN else: R = sqrt(B) @@ -915,7 +939,16 @@ cdef inline double calc_kurt(int64_t minp, int64_t nobs, double x, double xx, R = R * A D = xxxx / dnobs - R - 6 * B * A * A - 4 * C * A - if B == 0 or nobs < 4: + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 4: result = NaN else: K = (dnobs * dnobs - 1.) * D / (B * B) - 3 * ((dnobs - 1.) ** 2) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 3853ac017044c..288d9d7742daf 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -257,6 +257,16 @@ def u(s): def u_safe(s): return s + def to_str(s): + """ + Convert bytes and non-string into Python 3 str + """ + if isinstance(s, binary_type): + s = bytes_to_str(s) + elif not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): # encoding is for compat with PY2 return len(data) @@ -302,6 +312,14 @@ def u_safe(s): except: return s + def to_str(s): + """ + Convert unicode and non-string into Python 2 str + """ + if not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): try: data = data.decode(encoding) @@ -381,17 +399,20 @@ def raise_with_traceback(exc, traceback=Ellipsis): # http://stackoverflow.com/questions/4126348 # Thanks to @martineau at SO -from dateutil import parser as _date_parser import dateutil + +if PY2 and LooseVersion(dateutil.__version__) == '2.0': + # dateutil brokenness + raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' + 'install version 1.5 or 2.1+!') + +from dateutil import parser as _date_parser if LooseVersion(dateutil.__version__) < '2.0': + @functools.wraps(_date_parser.parse) def parse_date(timestr, *args, **kwargs): timestr = bytes(timestr) return _date_parser.parse(timestr, *args, **kwargs) -elif PY2 and LooseVersion(dateutil.__version__) == '2.0': - # dateutil brokenness - raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' - 'install version 1.5 or 2.1+!') else: parse_date = _date_parser.parse diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index e709c771b7d18..c574e6d56916b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2268,7 +2268,7 @@ def _recode_for_categories(codes, old_categories, new_categories): if len(old_categories) == 0: # All null anyway, so just retain the nulls - return codes + return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 33531e80449d8..94208a61a4377 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -479,3 +479,29 @@ def use_inf_as_na_cb(key): cf.register_option( 'engine', 'auto', parquet_engine_doc, validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet'])) + +# -------- +# Plotting +# --------- + +register_converter_doc = """ +: bool + Whether to register converters with matplotlib's units registry for + dates, times, datetimes, and Periods. Toggling to False will remove + the converters, restoring any converters that pandas overwrote. +""" + + +def register_converter_cb(key): + from pandas.plotting import register_matplotlib_converters + from pandas.plotting import deregister_matplotlib_converters + + if cf.get_option(key): + register_matplotlib_converters() + else: + deregister_matplotlib_converters() + + +with cf.config_prefix("plotting.matplotlib"): + cf.register_option("register_converters", True, register_converter_doc, + validator=bool, cb=register_converter_cb) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f3b11e52cdd7a..eae283e9bc00d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -136,7 +136,7 @@ def trans(x): # noqa try: if np.allclose(new_result, result, rtol=0): return new_result - except: + except Exception: # comparison of an object dtype with a number type could # hit here @@ -151,14 +151,14 @@ def trans(x): # noqa elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']: try: result = result.astype(dtype) - except: + except Exception: if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize('utc') result = result.tz_convert(dtype.tz) - except: + except Exception: pass return result @@ -210,7 +210,7 @@ def changeit(): new_result[mask] = om_at result[:] = new_result return result, False - except: + except Exception: pass # we are forced to change the dtype of the result as the input @@ -243,7 +243,7 @@ def changeit(): try: np.place(result, mask, other) - except: + except Exception: return changeit() return result, False @@ -274,14 +274,14 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.datetime64): try: fill_value = tslib.Timestamp(fill_value).value - except: + except Exception: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: fill_value = lib.Timedelta(fill_value).value - except: + except Exception: # as for datetimes, cannot upcast to object fill_value = iNaT else: @@ -592,12 +592,12 @@ def maybe_convert_scalar(values): def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ - l = len(categories) - if l < _int8_max: + length = len(categories) + if length < _int8_max: return _ensure_int8(indexer) - elif l < _int16_max: + elif length < _int16_max: return _ensure_int16(indexer) - elif l < _int32_max: + elif length < _int32_max: return _ensure_int32(indexer) return _ensure_int64(indexer) @@ -629,7 +629,7 @@ def conv(r, dtype): r = float(r) elif dtype.kind == 'i': r = int(r) - except: + except Exception: pass return r @@ -756,7 +756,7 @@ def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, if not isna(new_values).all(): values = new_values - except: + except Exception: pass else: # soft-conversion @@ -817,7 +817,7 @@ def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True, # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values values = values.copy() if copy else values - except: + except Exception: pass return values @@ -888,10 +888,10 @@ def try_datetime(v): try: from pandas import to_datetime return to_datetime(v) - except: + except Exception: pass - except: + except Exception: pass return v.reshape(shape) @@ -903,7 +903,7 @@ def try_timedelta(v): from pandas import to_timedelta try: return to_timedelta(v)._values.reshape(shape) - except: + except Exception: return v.reshape(shape) inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 93993fd0a0cab..bca5847f3a6cc 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -569,9 +569,10 @@ def _concat_rangeindex_same_dtype(indexes): start = step = next = None - for obj in indexes: - if not len(obj): - continue + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: if start is None: # This is set by the first non-empty index @@ -595,8 +596,16 @@ def _concat_rangeindex_same_dtype(indexes): if step is not None: next = obj[-1] + step - if start is None: + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1]._stop if next is None else next + else: + # Here all "indexes" had 0 length, i.e. were empty. + # Simply take start, stop, and step from the last empty index. + obj = indexes[-1] start = obj._start step = obj._step - stop = obj._stop if next is None else next + stop = obj._stop + return indexes[0].__class__(start, stop, step) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1af806e5cb9e..ad79001e45b86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -997,7 +997,7 @@ def to_dict(self, orient='dict', into=dict): for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [into_c((k, _maybe_box_datetimelike(v)) - for k, v in zip(self.columns, row)) + for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): return into_c((k, v.to_dict(into)) for k, v in self.iterrows()) @@ -3751,7 +3751,7 @@ def nlargest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -3788,7 +3788,7 @@ def nsmallest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -4035,6 +4035,8 @@ def combine(self, other, func, fill_value=None, overwrite=True): ---------- other : DataFrame func : function + Function that takes two series as inputs and return a Series or a + scalar fill_value : scalar value overwrite : boolean, default True If True then overwrite values for common keys in the calling frame @@ -4042,8 +4044,21 @@ def combine(self, other, func, fill_value=None, overwrite=True): Returns ------- result : DataFrame - """ + Examples + -------- + >>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2) + A B + 0 0 3 + 1 0 3 + + See Also + -------- + DataFrame.combine_first : Combine two DataFrame objects and default to + non-null values in frame calling the method + """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) @@ -4131,16 +4146,24 @@ def combine_first(self, other): ---------- other : DataFrame + Returns + ------- + combined : DataFrame + Examples -------- - a's values prioritized, use values from b to fill holes: - - >>> a.combine_first(b) + df1's values prioritized, use values from df2 to fill holes: + >>> df1 = pd.DataFrame([[1, np.nan]]) + >>> df2 = pd.DataFrame([[3, 4]]) + >>> df1.combine_first(df2) + 0 1 + 0 1 4.0 - Returns - ------- - combined : DataFrame + See Also + -------- + DataFrame.combine : Perform series-wise operation on two DataFrames + using a given function """ import pandas.core.computation.expressions as expressions @@ -4283,7 +4306,7 @@ def first_valid_index(self): return valid_indices[0] if len(valid_indices) else None @Appender(_shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'DataFrame'}) + 'position': 'last', 'klass': 'DataFrame'}) def last_valid_index(self): if len(self) == 0: return None @@ -5113,7 +5136,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): - ... df = df.append({'A'}: i}, ignore_index=True) + ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 @@ -5790,7 +5813,12 @@ def idxmin(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be NA + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- @@ -5821,7 +5849,12 @@ def idxmax(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be first index. + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 118e7d5cd437b..31bb9df53ad81 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -49,7 +49,7 @@ from pandas.tseries.frequencies import to_offset from pandas import compat from pandas.compat.numpy import function as nv -from pandas.compat import (map, zip, lzip, lrange, string_types, +from pandas.compat import (map, zip, lzip, lrange, string_types, to_str, isidentifier, set_function_name, cPickle as pkl) from pandas.core.ops import _align_method_FRAME import pandas.core.nanops as nanops @@ -3235,14 +3235,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): **{name: [r for r in items if r in labels]}) elif like: def f(x): - if not isinstance(x, string_types): - x = str(x) - return like in x + return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: + def f(x): + return matcher.search(to_str(x)) is not None matcher = re.compile(regex) - values = labels.map(lambda x: matcher.search(str(x)) is not None) + values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') @@ -6921,7 +6921,8 @@ def _add_numeric_operations(cls): @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis", - name1=name, name2=name2, axis_descr=axis_descr) + name1=name, name2=name2, axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6962,7 +6963,8 @@ def mad(self, axis=None, skipna=None, level=None): @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis", name1=name, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6986,10 +6988,10 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_min_count_stat_function( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', - nanops.nansum) + nanops.nansum, _sum_examples) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', @@ -7005,10 +7007,10 @@ def compound(self, axis=None, skipna=None, level=None): "by N-1\n", nanops.nankurt) cls.kurtosis = cls.kurt - cls.prod = _make_stat_function( + cls.prod = _make_min_count_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis', - nanops.nanprod) + nanops.nanprod, _prod_examples) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, @@ -7131,18 +7133,20 @@ def _doc_parms(cls): ---------- axis : %(axis_descr)s skipna : boolean, default True - Exclude NA/null values. If an entire row/column is NA or empty, the result - will be NA + Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. +%(min_count)s\ Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +%(outname)s : %(name1)s or %(name2)s (if level specified) + +%(examples)s""" _num_ddof_doc = """ @@ -7210,9 +7214,92 @@ def _doc_parms(cls): """ +_sum_examples = """\ +Examples +-------- +By default, the sum of an empty or all-NA Series is ``0``. + +>>> pd.Series([]).sum() # min_count=0 is the default +0.0 + +This can be controlled with the ``min_count`` parameter. For example, if +you'd like the sum of an empty series to be NaN, pass ``min_count=1``. + +>>> pd.Series([]).sum(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).sum() +0.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + +_prod_examples = """\ +Examples +-------- +By default, the product of an empty or all-NA Series is ``1`` + +>>> pd.Series([]).prod() +1.0 + +This can be controlled with the ``min_count`` parameter + +>>> pd.Series([]).prod(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).prod() +1.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + + +_min_count_stub = """\ +min_count : int, default 0 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + + .. versionadded :: 0.22.0 + + Added with the default being 1. This means the sum or product + of an all-NA or empty series is ``NaN``. +""" + + +def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, + f, examples): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, min_count=_min_count_stub, + examples=examples) + @Appender(_num_doc) + def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + min_count=0, + **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, min_count=min_count) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=numeric_only, min_count=min_count) + + return set_function_name(stat_func, name, cls) + + def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, min_count='', examples='') @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5c07033f5a68f..aef5ff7ba64d3 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -77,6 +77,119 @@ pandas.Panel.%(name)s """ +_apply_docs = dict( + template=""" + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a dataframe, a series or a scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods. + Pandas offers a wide range of method that will be much faster + than using ``apply`` for their specific purposes, so try to use them + before reaching for ``apply``. + + Parameters + ---------- + func : function + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func`` + + Returns + ------- + applied : Series or DataFrame + + Notes + ----- + In the current implementation ``apply`` calls func twice on the + first group to decide whether it can take a fast or slow code + path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + group. + + Examples + -------- + {examples} + + See also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate, transform + """, + dataframe_examples=""" + >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]}) + >>> g = df.groupby('A') + + From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: below the function passed to ``apply`` takes a dataframe as + its argument and returns a dataframe. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + Example 2: The function passed to ``apply`` takes a dataframe as + its argument and returns a series. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x.max() - x.min()) + B C + A + a 1 2 + b 0 0 + + Example 3: The function passed to ``apply`` takes a dataframe as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.C.max() - x.B.min()) + A + a 5 + b 2 + dtype: int64 + """, + series_examples=""" + >>> ser = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g = ser.groupby(ser.index) + + From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: The function passed to ``apply`` takes a series as + its argument and returns a series. ``apply`` combines the result for + each group together into a new series: + + >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2) + 0 0.0 + 1 0.5 + 2 4.0 + dtype: float64 + + Example 2: The function passed to ``apply`` takes a series as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + """) + _transform_template = """ Call function producing a like-indexed %(klass)s on each group and return a %(klass)s having the same indexes as the original object @@ -144,6 +257,7 @@ """ + # special case to prevent duplicate plots when catching exceptions when # forwarding methods from NDFrames _plotting_methods = frozenset(['plot', 'boxplot', 'hist']) @@ -206,12 +320,13 @@ class Grouper(object): sort : boolean, default to False whether to sort the resulting labels - additional kwargs to control time-like groupers (when freq is passed) + additional kwargs to control time-like groupers (when ``freq`` is passed) - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If grouper is PeriodIndex + base, loffset Returns ------- @@ -233,6 +348,7 @@ class Grouper(object): >>> df.groupby(Grouper(level='date', freq='60s', axis=1)) """ + _attributes = ('key', 'level', 'freq', 'axis', 'sort') def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: @@ -333,6 +449,14 @@ def _set_grouper(self, obj, sort=False): def groups(self): return self.grouper.groups + def __repr__(self): + attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name)) + for attr_name in self._attributes + if getattr(self, attr_name) is not None] + attrs = ", ".join(attrs_list) + cls_name = self.__class__.__name__ + return "{}({})".format(cls_name, attrs) + class GroupByPlot(PandasObject): """ @@ -653,50 +777,10 @@ def __iter__(self): """ return self.grouper.get_iterator(self.obj, axis=self.axis) - @Substitution(name='groupby') + @Appender(_apply_docs['template'] + .format(input="dataframe", + examples=_apply_docs['dataframe_examples'])) def apply(self, func, *args, **kwargs): - """ - Apply function and combine results together in an intelligent way. - - The split-apply-combine combination rules attempt to be as common - sense based as possible. For example: - - case 1: - group DataFrame - apply aggregation function (f(chunk) -> Series) - yield DataFrame, with group axis having group labels - - case 2: - group DataFrame - apply transform function ((f(chunk) -> DataFrame with same indexes) - yield DataFrame with resulting chunks glued together - - case 3: - group Series - apply function with f(chunk) -> DataFrame - yield DataFrame with result of chunks glued together - - Parameters - ---------- - func : function - - Notes - ----- - See online documentation for full exposition on how to use apply. - - In the current implementation apply calls func twice on the - first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if func has - side-effects, as they will take effect twice for the first - group. - - - See also - -------- - pipe : Apply function to the full GroupBy object instead of to each - group. - aggregate, transform - """ func = self._is_builtin_func(func) @@ -824,7 +908,8 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -832,7 +917,8 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True): continue try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(obj.values, how, + min_count=min_count) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -1139,7 +1225,8 @@ def _add_numeric_operations(cls): """ add numeric operations to the GroupBy generically """ def groupby_function(name, alias, npfunc, - numeric_only=True, _convert=False): + numeric_only=True, _convert=False, + min_count=-1): _local_template = "Compute %(f)s of group values" @@ -1149,6 +1236,8 @@ def groupby_function(name, alias, npfunc, def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only + if 'min_count' not in kwargs: + kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( @@ -1196,8 +1285,8 @@ def last(x): else: return last(x) - cls.sum = groupby_function('sum', 'add', np.sum) - cls.prod = groupby_function('prod', 'prod', np.prod) + cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) + cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, @@ -2023,7 +2112,7 @@ def get_group_levels(self): 'var': 'group_var', 'first': { 'name': 'group_nth', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) + 'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1) }, 'last': 'group_last', 'ohlc': 'group_ohlc', @@ -2093,7 +2182,7 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func, dtype_str - def _cython_operation(self, kind, values, how, axis): + def _cython_operation(self, kind, values, how, axis, min_count=-1): assert kind in ['transform', 'aggregate'] # can we do this operation with our cython functions @@ -2178,11 +2267,12 @@ def _cython_operation(self, kind, values, how, axis): counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate( result, counts, values, labels, func, is_numeric, - is_datetimelike) + is_datetimelike, min_count) elif kind == 'transform': result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan) + # TODO: min_count result = self._transform( result, values, labels, func, is_numeric, is_datetimelike) @@ -2219,14 +2309,15 @@ def _cython_operation(self, kind, values, how, axis): return result, names - def aggregate(self, values, how, axis=0): - return self._cython_operation('aggregate', values, how, axis) + def aggregate(self, values, how, axis=0, min_count=-1): + return self._cython_operation('aggregate', values, how, axis, + min_count=min_count) def transform(self, values, how, axis=0): return self._cython_operation('transform', values, how, axis) def _aggregate(self, result, counts, values, comp_ids, agg_func, - is_numeric, is_datetimelike): + is_numeric, is_datetimelike, min_count=-1): if values.ndim > 3: # punting for now raise NotImplementedError("number of dimensions is currently " @@ -2235,9 +2326,10 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func, for i, chunk in enumerate(values.transpose(2, 0, 1)): chunk = chunk.squeeze() - agg_func(result[:, :, i], counts, chunk, comp_ids) + agg_func(result[:, :, i], counts, chunk, comp_ids, + min_count) else: - agg_func(result, counts, values, comp_ids) + agg_func(result, counts, values, comp_ids, min_count) return result @@ -2847,9 +2939,11 @@ def is_in_obj(gpr): else: in_axis, name = False, None - if is_categorical_dtype(gpr) and len(gpr) != len(obj): - raise ValueError("Categorical dtype grouper must " - "have len(grouper) == len(data)") + if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: + raise ValueError( + ("Length of grouper ({len_gpr}) and axis ({len_axis})" + " must be same length" + .format(len_gpr=len(gpr), len_axis=obj.shape[axis]))) # create the Grouping # allow us to passing the actual Grouping as the gpr @@ -3011,6 +3105,12 @@ def _selection_name(self): """) + @Appender(_apply_docs['template'] + .format(input='series', + examples=_apply_docs['series_examples'])) + def apply(self, func, *args, **kwargs): + return super(SeriesGroupBy, self).apply(func, *args, **kwargs) + @Appender(_agg_doc) @Appender(_shared_docs['aggregate'] % dict( klass='Series', @@ -3503,9 +3603,10 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): new_items, new_blocks = self._cython_agg_blocks( - how, alt=alt, numeric_only=numeric_only) + how, alt=alt, numeric_only=numeric_only, min_count=min_count) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3531,7 +3632,8 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, alt=None, numeric_only=True): + def _cython_agg_blocks(self, how, alt=None, numeric_only=True, + min_count=-1): # TODO: the actual managing of mgr_locs is a PITA # here, it should happen via BlockManager.combine @@ -3548,7 +3650,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True): locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + block.values, how, axis=agg_axis, min_count=min_count) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions @@ -4363,7 +4465,8 @@ def count(self): ids, _, ngroups = self.grouper.group_info mask = ids != -1 - val = ((mask & ~isna(blk.get_values())) for blk in data.blocks) + val = ((mask & ~isna(np.atleast_2d(blk.get_values()))) + for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a995fc10a6674..83c78f084a9da 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1934,7 +1934,10 @@ def putmask(self, mask, value): try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) - except (ValueError, TypeError): + except (ValueError, TypeError) as err: + if is_object_dtype(self): + raise err + # coerces to object return self.astype(object).putmask(mask, value) @@ -2032,7 +2035,7 @@ def equals(self, other): try: return array_equivalent(_values_from_object(self), _values_from_object(other)) - except: + except Exception: return False def identical(self, other): @@ -2315,7 +2318,7 @@ def intersection(self, other): try: indexer = Index(other._values).get_indexer(self._values) indexer = indexer.take((indexer != -1).nonzero()[0]) - except: + except Exception: # duplicates indexer = algos.unique1d( Index(other._values).get_indexer_non_unique(self._values)[0]) @@ -3024,13 +3027,13 @@ def _reindex_non_unique(self, target): new_indexer = None if len(missing): - l = np.arange(len(indexer)) + length = np.arange(len(indexer)) missing = _ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(l[~check]) + missing_indexer = _ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(l[check]) + cur_indexer = _ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 8b680127723c3..70b531ffb0ec4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -79,7 +79,8 @@ def __new__(cls, data=None, categories=None, ordered=None, dtype=None, if data is not None or categories is None: cls._scalar_data_error(data) data = [] - data = cls._create_categorical(cls, data, categories, ordered) + data = cls._create_categorical(cls, data, categories, ordered, + dtype) if copy: data = data.copy() diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 71de6c7c3e8cf..4e9b2b9a2e922 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -681,7 +681,7 @@ def __sub__(self, other): return self._add_delta(-other) elif is_integer(other): return self.shift(-other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 18be6c61abdf7..3c518017a8808 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2,9 +2,11 @@ from __future__ import division import operator import warnings -from datetime import time, datetime -from datetime import timedelta +from datetime import time, datetime, timedelta + import numpy as np +from pytz import utc + from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( @@ -29,6 +31,7 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_box +from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index, Float64Index @@ -55,10 +58,6 @@ from pandas._libs.tslibs import timezones -def _utc(): - import pytz - return pytz.utc - # -------- some conversion wrapper functions @@ -66,7 +65,6 @@ def _field_accessor(name, field, docstring=None): def f(self): values = self.asi8 if self.tz is not None: - utc = _utc() if self.tz is not utc: values = self._local_timestamps() @@ -451,7 +449,7 @@ def _generate(cls, start, end, periods, name, offset, try: inferred_tz = timezones.infer_tzinfo(start, end) - except: + except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') @@ -562,8 +560,6 @@ def _convert_for_op(self, value): raise ValueError('Passed item and index have different timezone') def _local_timestamps(self): - utc = _utc() - if self.is_monotonic: return libts.tz_convert(self.asi8, utc, self.tz) else: @@ -767,7 +763,7 @@ def _sub_datelike(self, other): raise TypeError("DatetimeIndex subtraction must have the same " "timezones or no timezones") result = self._sub_datelike_dti(other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): other = Timestamp(other) if other is libts.NaT: result = self._nat_new(box=False) @@ -777,7 +773,8 @@ def _sub_datelike(self, other): "timezones or no timezones") else: i8 = self.asi8 - result = i8 - other.value + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=libts.iNaT) else: @@ -823,7 +820,6 @@ def _add_delta(self, delta): tz = 'UTC' if self.tz is not None else None result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') - utc = _utc() if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) return result @@ -877,7 +873,6 @@ def astype(self, dtype, copy=True): raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype) def _get_time_micros(self): - utc = _utc() values = self.asi8 if self.tz is not None and self.tz is not utc: values = self._local_timestamps() @@ -1183,12 +1178,12 @@ def __iter__(self): # convert in chunks of 10k for efficiency data = self.asi8 - l = len(self) + length = len(self) chunksize = 10000 - chunks = int(l / chunksize) + 1 + chunks = int(length / chunksize) + 1 for i in range(chunks): start_i = i * chunksize - end_i = min((i + 1) * chunksize, l) + end_i = min((i + 1) * chunksize, length) converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box=True) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7bf7cfce515a1..9619f5403b761 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -179,7 +179,7 @@ def __new__(cls, data, closed='right', if isinstance(data, IntervalIndex): left = data.left right = data.right - + closed = data.closed else: # don't allow scalars @@ -187,7 +187,7 @@ def __new__(cls, data, closed='right', cls._scalar_data_error(data) data = IntervalIndex.from_intervals(data, name=name) - left, right = data.left, data.right + left, right, closed = data.left, data.right, data.closed return cls._simple_new(left, right, closed, name, copy=copy, verify_integrity=verify_integrity) @@ -569,7 +569,8 @@ def copy(self, deep=False, name=None): left = self.left.copy(deep=True) if deep else self.left right = self.right.copy(deep=True) if deep else self.right name = name if name is not None else self.name - return type(self).from_arrays(left, right, name=name) + closed = self.closed + return type(self).from_arrays(left, right, closed=closed, name=name) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4cc59f5297058..f4acb6862addb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -446,6 +446,17 @@ def _shallow_copy_with_infer(self, values=None, **kwargs): **kwargs) return self._shallow_copy(values, **kwargs) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) + def __contains__(self, key): + hash(key) + try: + self.get_loc(key) + return True + except (LookupError, TypeError): + return False + + contains = __contains__ + @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: @@ -809,9 +820,10 @@ def duplicated(self, keep='first'): return duplicated_int64(ids, keep) - @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value=None, downcast=None): - # isna is not implemented for MultiIndex + """ + fillna is not implemented for MultiIndex + """ raise NotImplementedError('isna is not defined for MultiIndex') @Appender(_index_shared_docs['dropna']) @@ -1370,17 +1382,6 @@ def nlevels(self): def levshape(self): return tuple(len(x) for x in self.levels) - @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) - def __contains__(self, key): - hash(key) - try: - self.get_loc(key) - return True - except LookupError: - return False - - contains = __contains__ - def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6e08c32f30dcd..0cc35300f0d17 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -36,6 +36,26 @@ join as libjoin, Timedelta, NaT, iNaT) +def _field_accessor(name, alias, docstring=None): + def f(self): + if self.hasnans: + result = np.empty(len(self), dtype='float64') + mask = self._isnan + imask = ~mask + result.flat[imask] = np.array([getattr(Timedelta(val), alias) + for val in self.asi8[imask]]) + result[mask] = np.nan + else: + result = np.array([getattr(Timedelta(val), alias) + for val in self.asi8], dtype='int64') + + return Index(result, name=self.name) + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + def _td_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert timedelta-like to timedelta64 @@ -361,7 +381,8 @@ def _add_datelike(self, other): else: other = Timestamp(other) i8 = self.asi8 - result = checked_add_with_arr(i8, other.value) + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) return DatetimeIndex(result, name=self.name, copy=False) @@ -380,46 +401,17 @@ def _format_native_types(self, na_rep=u('NaT'), nat_rep=na_rep, justify='all').get_result() - def _get_field(self, m): - - values = self.asi8 - hasnans = self.hasnans - if hasnans: - result = np.empty(len(self), dtype='float64') - mask = self._isnan - imask = ~mask - result.flat[imask] = np.array( - [getattr(Timedelta(val), m) for val in values[imask]]) - result[mask] = np.nan - else: - result = np.array([getattr(Timedelta(val), m) - for val in values], dtype='int64') - return Index(result, name=self.name) - - @property - def days(self): - """ Number of days for each element. """ - return self._get_field('days') - - @property - def seconds(self): - """ Number of seconds (>= 0 and less than 1 day) for each element. """ - return self._get_field('seconds') - - @property - def microseconds(self): - """ - Number of microseconds (>= 0 and less than 1 second) for each - element. """ - return self._get_field('microseconds') - - @property - def nanoseconds(self): - """ - Number of nanoseconds (>= 0 and less than 1 microsecond) for each - element. - """ - return self._get_field('nanoseconds') + days = _field_accessor("days", "days", + " Number of days for each element. ") + seconds = _field_accessor("seconds", "seconds", + " Number of seconds (>= 0 and less than 1 day) " + "for each element. ") + microseconds = _field_accessor("microseconds", "microseconds", + "\nNumber of microseconds (>= 0 and less " + "than 1 second) for each\nelement. ") + nanoseconds = _field_accessor("nanoseconds", "nanoseconds", + "\nNumber of nanoseconds (>= 0 and less " + "than 1 microsecond) for each\nelement.\n") @property def components(self): @@ -850,7 +842,7 @@ def insert(self, loc, item): if _is_convertible_to_td(item): try: item = Timedelta(item) - except: + except Exception: pass freq = None diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 045580d393b26..3b7cd1d02e1d3 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1837,8 +1837,10 @@ def _can_hold_element(self, element): if tipo is not None: return (issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(tipo.type, (np.datetime64, np.timedelta64))) - return (isinstance(element, (float, int, np.floating, np.int_)) and - not isinstance(element, (bool, np.bool_, datetime, timedelta, + return ( + isinstance( + element, (float, int, np.floating, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))) def to_native_types(self, slicer=None, na_rep='', float_format=None, @@ -1886,9 +1888,11 @@ def _can_hold_element(self, element): if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) - return (isinstance(element, - (float, int, complex, np.float_, np.int_)) and - not isinstance(element, (bool, np.bool_))) + return ( + isinstance( + element, + (float, int, complex, np.float_, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_))) def should_store(self, value): return issubclass(value.dtype.type, np.complexfloating) @@ -1946,7 +1950,8 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, np.timedelta64) - return isinstance(element, (timedelta, np.timedelta64)) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) def fillna(self, value, **kwargs): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index baeb869239c1e..d1a355021f388 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -107,21 +107,14 @@ def f(values, axis=None, skipna=True, **kwds): if k not in kwds: kwds[k] = v try: - if values.size == 0: - - # we either return np.nan or pd.NaT - if is_numeric_dtype(values): - values = values.astype('float64') - fill_value = na_value_for_dtype(values.dtype) - - if values.ndim == 1: - return fill_value - else: - result_shape = (values.shape[:axis] + - values.shape[axis + 1:]) - result = np.empty(result_shape, dtype=values.dtype) - result.fill(fill_value) - return result + if values.size == 0 and kwds.get('min_count') is None: + # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` + return _na_for_min_count(values, axis) if (_USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name)): @@ -292,6 +285,36 @@ def _wrap_results(result, dtype): return result +def _na_for_min_count(values, axis): + """Return the missing value for `values` + + Parameters + ---------- + values : ndarray + axis : int or None + axis for the reduction + + Returns + ------- + result : scalar or ndarray + For 1-D values, returns a scalar of the correct missing type. + For 2-D values, returns a 1-D array where each element is missing. + """ + # we either return np.nan or pd.NaT + if is_numeric_dtype(values): + values = values.astype('float64') + fill_value = na_value_for_dtype(values.dtype) + + if values.ndim == 1: + return fill_value + else: + result_shape = (values.shape[:axis] + + values.shape[axis + 1:]) + result = np.empty(result_shape, dtype=values.dtype) + result.fill(fill_value) + return result + + def nanany(values, axis=None, skipna=True): values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna) return values.any(axis) @@ -304,7 +327,7 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nansum(values, axis=None, skipna=True): +def nansum(values, axis=None, skipna=True, min_count=0): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -312,7 +335,7 @@ def nansum(values, axis=None, skipna=True): elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask) + the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype) @@ -548,6 +571,9 @@ def nanskew(values, axis=None, skipna=True): m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error + # + # #18044 in _libs/windows.pyx calc_skew follow this behavior + # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) @@ -609,6 +635,9 @@ def nankurt(values, axis=None, skipna=True): result = numer / denom - adj # floating point error + # + # #18044 in _libs/windows.pyx calc_kurt follow this behavior + # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) @@ -635,13 +664,13 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True): +def nanprod(values, axis=None, skipna=True, min_count=0): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask) + return _maybe_null_out(result, axis, mask, min_count=min_count) def _maybe_arg_null_out(result, axis, mask, skipna): @@ -677,9 +706,9 @@ def _get_counts(mask, axis, dtype=float): return np.array(count, dtype=dtype) -def _maybe_null_out(result, axis, mask): +def _maybe_null_out(result, axis, mask, min_count=1): if axis is not None and getattr(result, 'ndim', False): - null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 + null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): @@ -692,13 +721,14 @@ def _maybe_null_out(result, axis, mask): result[null_mask] = None elif result is not tslib.NaT: null_mask = mask.size - mask.sum() - if null_mask == 0: + if null_mask < min_count: result = np.nan return result def _zero_out_fperr(arg): + # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): with np.errstate(invalid='ignore'): return np.where(np.abs(arg) < 1e-14, 0, arg) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 6edbb99641542..db1d3d4c5e31b 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -395,7 +395,11 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis) try: - result = grouped.aggregate(how, *args, **kwargs) + if isinstance(obj, ABCDataFrame) and compat.callable(how): + # Check if the function is reducing or not. + result = grouped._aggregate_item_by_item(how, *args, **kwargs) + else: + result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function @@ -597,9 +601,20 @@ def size(self): Resampler._deprecated_valids += dir(Resampler) + # downsample methods -for method in ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'ohlc']: +for method in ['sum', 'prod']: + + def f(self, _method=method, min_count=0, *args, **kwargs): + nv.validate_resampler_func(_method, args, kwargs) + return self._downsample(_method, min_count=min_count) + f.__doc__ = getattr(GroupBy, method).__doc__ + setattr(Resampler, method, f) + + +# downsample methods +for method in ['min', 'max', 'first', 'last', 'mean', 'sem', + 'median', 'ohlc']: def f(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) @@ -1010,22 +1025,18 @@ class TimeGrouper(Grouper): Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right - nperiods : optional, integer + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex - - Notes - ----- - Use begin, end, nperiods to generate intervals that cannot be derived - directly from the associated object """ + _attributes = Grouper._attributes + ('closed', 'label', 'how', + 'loffset', 'kind', 'convention', + 'base') def __init__(self, freq='Min', closed=None, label=None, how='mean', - nperiods=None, axis=0, - fill_method=None, limit=None, loffset=None, kind=None, - convention=None, base=0, **kwargs): + axis=0, fill_method=None, limit=None, loffset=None, + kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) @@ -1044,7 +1055,6 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', self.closed = closed self.label = label - self.nperiods = nperiods self.kind = kind self.convention = convention or 'E' @@ -1137,6 +1147,16 @@ def _get_time_bins(self, ax): tz=tz, name=ax.name) + # GH 15549 + # In edge case of tz-aware resapmling binner last index can be + # less than the last variable in data object, this happens because of + # DST time change + if len(binner) > 1 and binner[-1] < last: + extra_date_range = pd.date_range(binner[-1], last + self.freq, + freq=self.freq, tz=tz, + name=ax.name) + binner = labels = binner.append(extra_date_range[1:]) + # a little hack trimmed = False if (len(binner) > 2 and binner[-2] == last and diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e409090e76944..bdb7ec00a29fd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -126,7 +126,7 @@ def _groupby_and_merge(by, on, left, right, _merge_pieces, try: if k in merged: merged[k] = key - except: + except KeyError: pass pieces.append(merged) @@ -1253,10 +1253,12 @@ def _get_merge_keys(self): join_names) = super(_AsOfMerge, self)._get_merge_keys() # validate index types are the same - for lk, rk in zip(left_join_keys, right_join_keys): + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): - raise MergeError("incompatible merge keys, " - "must be the same type") + raise MergeError("incompatible merge keys [{i}] {lkdtype} and " + "{rkdtype}, must be the same type" + .format(i=i, lkdtype=lk.dtype, + rkdtype=rk.dtype)) # validate tolerance; must be a Timedelta if we have a DTI if self.tolerance is not None: @@ -1266,8 +1268,10 @@ def _get_merge_keys(self): else: lt = left_join_keys[-1] - msg = "incompatible tolerance, must be compat " \ - "with type {lt}".format(lt=type(lt)) + msg = ("incompatible tolerance {tolerance}, must be compat " + "with type {lkdtype}".format( + tolerance=type(self.tolerance), + lkdtype=lt.dtype)) if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): if not isinstance(self.tolerance, Timedelta): @@ -1503,12 +1507,12 @@ def _sort_labels(uniques, left, right): # tuplesafe uniques = Index(uniques).values - l = len(left) + llength = len(left) labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) new_labels = _ensure_int64(new_labels) - new_left, new_right = new_labels[:l], new_labels[l:] + new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right @@ -1525,7 +1529,8 @@ def _get_join_keys(llab, rlab, shape, sort): rkey = stride * rlab[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): - stride //= shape[i] + with np.errstate(divide='ignore'): + stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index fda339aa30461..2adf17a227a59 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -148,7 +148,7 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): Parameters ---------- - x : ndarray or Series + x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..2b4f9c4c6f7e3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -597,7 +597,7 @@ def _ixs(self, i, axis=0): return values[i] except IndexError: raise - except: + except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) @@ -675,7 +675,7 @@ def _get_with(self, key): if isinstance(key, tuple): try: return self._get_values_tuple(key) - except: + except Exception: if len(key) == 1: key = key[0] if isinstance(key, slice): @@ -818,7 +818,7 @@ def _set_with(self, key, value): if not isinstance(key, (list, Series, np.ndarray, Series)): try: key = list(key) - except: + except Exception: key = [key] if isinstance(key, Index): @@ -1306,7 +1306,13 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1336,7 +1342,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1731,11 +1743,26 @@ def combine(self, other, func, fill_value=np.nan): ---------- other : Series or scalar value func : function + Function that takes two scalars as inputs and return a scalar fill_value : scalar value Returns ------- result : Series + + Examples + -------- + >>> s1 = Series([1, 2]) + >>> s2 = Series([0, 3]) + >>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2) + 0 0 + 1 2 + dtype: int64 + + See Also + -------- + Series.combine_first : Combine Series values, choosing the calling + Series's values first """ if isinstance(other, Series): new_index = self.index.union(other.index) @@ -1764,7 +1791,21 @@ def combine_first(self, other): Returns ------- - y : Series + combined : Series + + Examples + -------- + >>> s1 = pd.Series([1, np.nan]) + >>> s2 = pd.Series([3, 4]) + >>> s1.combine_first(s2) + 0 1.0 + 1 4.0 + dtype: float64 + + See Also + -------- + Series.combine : Perform elementwise operation on two Series + using a given function """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) @@ -1982,7 +2023,7 @@ def nlargest(self, n=5, keep='first'): ---------- n : int Return this many descending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -2029,7 +2070,7 @@ def nsmallest(self, n=5, keep='first'): ---------- n : int Return this many ascending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. diff --git a/pandas/core/strings.py b/pandas/core/strings.py index abef6f6086dbd..9614641aa1abf 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1423,6 +1423,10 @@ def cons_row(x): return [x] result = [cons_row(x) for x in result] + if result: + # propogate nan values to match longest sequence (GH 18450) + max_len = max(len(x) for x in result) + result = [x * max_len if x[0] is np.nan else x for x in result] if not isinstance(expand, bool): raise ValueError("expand must be True or False") diff --git a/pandas/core/window.py b/pandas/core/window.py index 5143dddc5e866..345f9b035a36b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -503,6 +503,9 @@ class Window(_Window): * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). + If ``win_type=None`` all points are evenly weighted. To learn more about + different window types see `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__. """ def validate(self): diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c5d4a0ecf44ab..bac5ac762400d 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -45,7 +45,6 @@ import pandas as pd import numpy as np -import itertools import csv from functools import partial @@ -891,6 +890,7 @@ def get_col_type(dtype): name = any(self.frame.index.names) cname = any(self.frame.columns.names) lastcol = self.frame.index.nlevels - 1 + previous_lev3 = None for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format() blank = ' ' * len(lev2[0]) @@ -901,11 +901,19 @@ def get_col_type(dtype): lev3 = [blank] * clevels if name: lev3.append(lev.name) - for level_idx, group in itertools.groupby( - self.frame.index.labels[i]): - count = len(list(group)) - lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) + current_idx_val = None + for level_idx in self.frame.index.labels[i]: + if ((previous_lev3 is None or + previous_lev3[len(lev3)].isspace()) and + lev2[level_idx] == current_idx_val): + # same index as above row and left index was the same + lev3.append(blank) + else: + # different value than above or left index different + lev3.append(lev2[level_idx]) + current_idx_val = lev2[level_idx] strcols.insert(i, lev3) + previous_lev3 = lev3 column_format = self.column_format if column_format is None: @@ -942,8 +950,8 @@ def get_col_type(dtype): if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') - buf.write('\\multicolumn{3}{r}{{Continued on next ' - 'page}} \\\\\n') + buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' + 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') @@ -1695,7 +1703,7 @@ def _save_header(self): else: encoded_labels = [] - if not has_mi_columns: + if not has_mi_columns or has_aliases: encoded_labels += list(write_cols) writer.writerow(encoded_labels) else: diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b4dc9173f11ba..caa67d1ce6bce 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -29,9 +29,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, The main method a user calls to execute a Query in Google BigQuery and read results into a pandas DataFrame. - Google BigQuery API Client Library v2 for Python is used. - Documentation is available `here - <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__ + This function requires the `pandas-gbq package + <https://pandas-gbq.readthedocs.io>`__. Authentication to the Google BigQuery service is via OAuth 2.0. @@ -70,7 +69,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. - 'standard' : Use BigQuery's standard SQL (beta), which is + 'standard' : Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery SQL Reference <https://cloud.google.com/bigquery/sql-reference/>`__ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index be39f4baba0fb..203b1d62fcbf3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas._libs.tslib import iNaT -from pandas.compat import StringIO, long, u +from pandas.compat import StringIO, long, u, to_str from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, @@ -458,8 +458,10 @@ def read(self): if self.lines and self.chunksize: obj = concat(self) elif self.lines: + + data = to_str(self.data) obj = self._get_object_parser( - self._combine_lines(self.data.split('\n')) + self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) @@ -612,7 +614,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: dtype = np.dtype(dtype) return data.astype(dtype), True - except: + except (TypeError, ValueError): return data, False if convert_dates: @@ -628,7 +630,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass if data.dtype.kind == 'f': @@ -639,7 +641,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass # do't coerce 0-len data @@ -651,7 +653,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, if (new_data == data).all(): data = new_data result = True - except: + except (TypeError, ValueError): pass # coerce ints to 64 @@ -661,7 +663,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('int64') result = True - except: + except (TypeError, ValueError): pass return data, result @@ -680,7 +682,7 @@ def _try_convert_to_date(self, data): if new_data.dtype == 'object': try: new_data = data.astype('int64') - except: + except (TypeError, ValueError): pass # ignore numbers that are out of range @@ -697,7 +699,7 @@ def _try_convert_to_date(self, data): unit=date_unit) except ValueError: continue - except: + except Exception: break return new_data, True return data, False diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index e811dd1eab142..23d2f730d070c 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -181,7 +181,7 @@ def _pull_field(js, spec): return result - if isinstance(data, list) and len(data) is 0: + if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob @@ -207,9 +207,7 @@ def _pull_field(js, spec): elif not isinstance(meta, list): meta = [meta] - for i, x in enumerate(meta): - if not isinstance(x, list): - meta[i] = [x] + meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx index fd3f4612fb432..f175a6743f44b 100644 --- a/pandas/io/msgpack/_packer.pyx +++ b/pandas/io/msgpack/_packer.pyx @@ -8,6 +8,7 @@ from libc.limits cimport * from pandas.io.msgpack.exceptions import PackValueError from pandas.io.msgpack import ExtType +import numpy as np cdef extern from "../../src/msgpack/pack.h": @@ -133,7 +134,7 @@ cdef class Packer(object): while True: if o is None: ret = msgpack_pack_nil(&self.pk) - elif isinstance(o, bool): + elif isinstance(o, (bool, np.bool_)): if o: ret = msgpack_pack_true(&self.pk) else: diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 92270b39f56ef..abd258034af99 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -350,8 +350,11 @@ def unconvert(values, dtype, compress=None): ) # fall through to copying `np.fromstring` - # Copy the string into a numpy array. - return np.fromstring(values, dtype=dtype) + # Copy the bytes into a numpy array. + buf = np.frombuffer(values, dtype=dtype) + buf = buf.copy() # required to not mutate the original data + buf.flags.writeable = True + return buf def encode(obj): diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4b507b7f5df6f..eaaa14e756e22 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -3,7 +3,8 @@ from warnings import catch_warnings from distutils.version import LooseVersion from pandas import DataFrame, RangeIndex, Int64Index, get_option -from pandas.compat import range +from pandas.compat import string_types +from pandas.core.common import AbstractMethodError from pandas.io.common import get_filepath_or_buffer @@ -25,6 +26,11 @@ def get_engine(engine): except ImportError: pass + raise ImportError("Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "pyarrow or fastparquet is required for parquet " + "support") + if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") @@ -34,37 +40,75 @@ def get_engine(engine): return FastParquetImpl() -class PyArrowImpl(object): +class BaseImpl(object): + + api = None # module + + @staticmethod + def validate_dataframe(df): + + if not isinstance(df, DataFrame): + raise ValueError("to_parquet only supports IO with DataFrames") + + # must have value column names (strings only) + if df.columns.inferred_type not in {'string', 'unicode'}: + raise ValueError("parquet must have string column names") + + # index level names must be strings + valid_names = all( + isinstance(name, string_types) + for name in df.index.names + if name is not None + ) + if not valid_names: + raise ValueError("Index level names must be strings") + + def write(self, df, path, compression, **kwargs): + raise AbstractMethodError(self) + + def read(self, path, columns=None, **kwargs): + raise AbstractMethodError(self) + + +class PyArrowImpl(BaseImpl): def __init__(self): # since pandas is a dependency of pyarrow # we need to import on first use - try: import pyarrow import pyarrow.parquet except ImportError: - raise ImportError("pyarrow is required for parquet support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - + raise ImportError( + "pyarrow is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) if LooseVersion(pyarrow.__version__) < '0.4.1': - raise ImportError("pyarrow >= 0.4.1 is required for parquet" - "support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - - self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0' - self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0' + raise ImportError( + "pyarrow >= 0.4.1 is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) + + self._pyarrow_lt_060 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0')) + self._pyarrow_lt_070 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0')) + self.api = pyarrow def write(self, df, path, compression='snappy', coerce_timestamps='ms', **kwargs): + self.validate_dataframe(df) + if self._pyarrow_lt_070: + self._validate_write_lt_070(df) path, _, _ = get_filepath_or_buffer(path) + if self._pyarrow_lt_060: table = self.api.Table.from_pandas(df, timestamps_to_ms=True) self.api.parquet.write_table( @@ -76,37 +120,77 @@ def write(self, df, path, compression='snappy', table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.parquet.read_table(path).to_pandas() - - -class FastParquetImpl(object): + if self._pyarrow_lt_070: + return self.api.parquet.read_pandas(path, columns=columns, + **kwargs).to_pandas() + kwargs['use_pandas_metadata'] = True + return self.api.parquet.read_table(path, columns=columns, + **kwargs).to_pandas() + + def _validate_write_lt_070(self, df): + # Compatibility shim for pyarrow < 0.7.0 + # TODO: Remove in pandas 0.22.0 + from pandas.core.indexes.multi import MultiIndex + if isinstance(df.index, MultiIndex): + msg = ( + "Multi-index DataFrames are only supported " + "with pyarrow >= 0.7.0" + ) + raise ValueError(msg) + # Validate index + if not isinstance(df.index, Int64Index): + msg = ( + "pyarrow < 0.7.0 does not support serializing {} for the " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + raise ValueError(msg.format(type(df.index))) + if not df.index.equals(RangeIndex(len(df))): + raise ValueError( + "pyarrow < 0.7.0 does not support serializing a non-default " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + if df.index.name is not None: + raise ValueError( + "pyarrow < 0.7.0 does not serialize indexes with a name; you " + "can set the index.name to None or install the latest version " + "of pyarrow or fastparquet." + ) + + +class FastParquetImpl(BaseImpl): def __init__(self): # since pandas is a dependency of fastparquet # we need to import on first use - try: import fastparquet except ImportError: - raise ImportError("fastparquet is required for parquet support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet is required for parquet support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) if LooseVersion(fastparquet.__version__) < '0.1.0': - raise ImportError("fastparquet >= 0.1.0 is required for parquet " - "support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet >= 0.1.0 is required for parquet " + "support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) self.api = fastparquet def write(self, df, path, compression='snappy', **kwargs): + self.validate_dataframe(df) # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. @@ -115,9 +199,10 @@ def write(self, df, path, compression='snappy', **kwargs): self.api.write(path, df, compression=compression, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas() + parquet_file = self.api.ParquetFile(path) + return parquet_file.to_pandas(columns=columns, **kwargs) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -138,47 +223,11 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): kwargs Additional keyword arguments passed to the engine """ - impl = get_engine(engine) + return impl.write(df, path, compression=compression, **kwargs) - if not isinstance(df, DataFrame): - raise ValueError("to_parquet only support IO with DataFrames") - - valid_types = {'string', 'unicode'} - - # validate index - # -------------- - - # validate that we have only a default index - # raise on anything else as we don't serialize the index - - if not isinstance(df.index, Int64Index): - raise ValueError("parquet does not support serializing {} " - "for the index; you can .reset_index()" - "to make the index into column(s)".format( - type(df.index))) - if not df.index.equals(RangeIndex.from_range(range(len(df)))): - raise ValueError("parquet does not support serializing a " - "non-default index for the index; you " - "can .reset_index() to make the index " - "into column(s)") - - if df.index.name is not None: - raise ValueError("parquet does not serialize index meta-data on a " - "default index") - - # validate columns - # ---------------- - - # must have value column names (strings only) - if df.columns.inferred_type not in valid_types: - raise ValueError("parquet must have string column names") - - return impl.write(df, path, compression=compression) - - -def read_parquet(path, engine='auto', **kwargs): +def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. @@ -188,6 +237,10 @@ def read_parquet(path, engine='auto', **kwargs): ---------- path : string File path + columns: list, default=None + If not None, only these columns will be read from the file. + + .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first @@ -201,4 +254,4 @@ def read_parquet(path, engine='auto', **kwargs): """ impl = get_engine(engine) - return impl.read(path) + return impl.read(path, columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1b6414ea974fa..df8b1b5cca1d3 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -74,15 +74,19 @@ .. versionadded:: 0.18.1 support for the Python parser. header : int or list of ints, default 'infer' - Row number(s) to use as the column names, and the start of the data. - Default behavior is as if set to 0 if no ``names`` passed, otherwise - ``None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of integers that specify row locations for - a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a multi-index on the columns + e.g. [0,1,3]. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so header=0 denotes the first line of + data rather than the first line of the file. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None. Duplicates in this list will cause @@ -1231,6 +1235,8 @@ def __init__(self, kwds): self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') + self.na_filter = kwds.get('na_filter', False) + self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.as_recarray = kwds.get('as_recarray', False) @@ -1404,7 +1410,6 @@ def _make_index(self, data, alldata, columns, indexnamerow=False): elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) - elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, @@ -1487,8 +1492,12 @@ def _agg_index(self, index, try_parse_dates=True): if (try_parse_dates and self._should_parse_dates(i)): arr = self._date_conv(arr) - col_na_values = self.na_values - col_na_fvalues = self.na_fvalues + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() if isinstance(self.na_values, dict): col_name = self.index_names[i] @@ -1671,7 +1680,9 @@ def __init__(self, src, **kwds): ParserBase.__init__(self, kwds) - if 'utf-16' in (kwds.get('encoding') or ''): + if (kwds.get('compression') is None and + 'utf-16' in (kwds.get('encoding') or '')): + # if source is utf-16 plain text, convert source to utf-8 if isinstance(src, compat.string_types): src = open(src, 'rb') self.handles.append(src) @@ -2041,8 +2052,6 @@ def __init__(self, f, **kwds): self.names_passed = kwds['names'] or None - self.na_filter = kwds['na_filter'] - self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 40955c50f6b5f..2a1aaf2f66469 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2137,10 +2137,17 @@ def convert(self, values, nan_rep, encoding): # if we have stored a NaN in the categories # then strip it; in theory we could have BOTH # -1s in the codes and nulls :< - mask = isna(categories) - if mask.any(): - categories = categories[~mask] - codes[codes != -1] -= mask.astype(int).cumsum().values + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum().values self.data = Categorical.from_codes(codes, categories=categories, diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c42c19e1357bc..a9b4f504dd624 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -103,12 +103,12 @@ def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors='ignore', **format) else: - if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, errors='coerce', unit=format, utc=utc) - elif (issubclass(col.dtype.type, np.floating) or - issubclass(col.dtype.type, np.integer)): - # parse dates as timestamp - format = 's' if format is None else format + # Allow passing of formatting string for integers + # GH17855 + if format is None and (issubclass(col.dtype.type, np.floating) or + issubclass(col.dtype.type, np.integer)): + format = 's' + if format in ['D', 'd', 'h', 'm', 's', 'ms', 'us', 'ns']: return to_datetime(col, errors='coerce', unit=format, utc=utc) elif is_datetime64tz_dtype(col): # coerce to UTC timezone diff --git a/pandas/io/stata.py b/pandas/io/stata.py index afc1631a947c8..aafe5f2ce76bd 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -306,11 +306,11 @@ def convert_delta_safe(base, deltas, unit): data_col[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) - if fmt in ["%tc", "tc"]: # Delta ms relative to base + if fmt.startswith(("%tc", "tc")): # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') - elif fmt in ["%tC", "tC"]: + elif fmt.startswith(("%tC", "tC")): from warnings import warn warn("Encountered %tC format. Leaving in Stata Internal Format.") @@ -318,27 +318,30 @@ def convert_delta_safe(base, deltas, unit): if has_bad_values: conv_dates[bad_locs] = pd.NaT return conv_dates - elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base + # Delta days relative to base + elif fmt.startswith(("%td", "td", "%d", "d")): base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, 'd') - elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week + # does not count leap days - 7 days is a week. + # 52nd week may have more than 7 days + elif fmt.startswith(("%tw", "tw")): year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) - elif fmt in ["%tm", "tm"]: # Delta months relative to base + elif fmt.startswith(("%tm", "tm")): # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%tq", "tq"]: # Delta quarters relative to base + elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base year = stata_epoch.year + dates // 4 month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%th", "th"]: # Delta half-years relative to base + elif fmt.startswith(("%th", "th")): # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%ty", "ty"]: # Years -- not delta + elif fmt.startswith(("%ty", "ty")): # Years -- not delta year = dates month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) @@ -1029,10 +1032,6 @@ def _read_header(self): # calculate size of a data record self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist) - # remove format details from %td - self.fmtlist = ["%td" if x.startswith("%td") else x - for x in self.fmtlist] - def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> @@ -1578,7 +1577,8 @@ def read(self, nrows=None, convert_dates=None, self._do_convert_missing(data, convert_missing) if convert_dates: - cols = np.where(lmap(lambda x: x in _date_formats, + cols = np.where(lmap(lambda x: any(x.startswith(fmt) + for fmt in _date_formats), self.fmtlist))[0] for i in cols: col = data.columns[i] diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index 8f98e297e3e66..385d4d7f047c7 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -11,3 +11,10 @@ from pandas.plotting._core import boxplot from pandas.plotting._style import plot_params from pandas.plotting._tools import table +try: + from pandas.plotting._converter import \ + register as register_matplotlib_converters + from pandas.plotting._converter import \ + deregister as deregister_matplotlib_converters +except ImportError: + pass diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 47d15195315ba..357e84d1f17ea 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -1,3 +1,4 @@ +import warnings from datetime import datetime, timedelta import datetime as pydt import numpy as np @@ -44,14 +45,96 @@ MUSEC_PER_DAY = 1e6 * SEC_PER_DAY +_WARN = True # Global for whether pandas has registered the units explicitly +_mpl_units = {} # Cache for units overwritten by us -def register(): - units.registry[lib.Timestamp] = DatetimeConverter() - units.registry[Period] = PeriodConverter() - units.registry[pydt.datetime] = DatetimeConverter() - units.registry[pydt.date] = DatetimeConverter() - units.registry[pydt.time] = TimeConverter() - units.registry[np.datetime64] = DatetimeConverter() + +def get_pairs(): + pairs = [ + (lib.Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register(explicit=True): + """Register Pandas Formatters and Converters with matplotlib + + This function modifies the global ``matplotlib.units.registry`` + dictionary. Pandas adds custom converters for + + * pd.Timestamp + * pd.Period + * np.datetime64 + * datetime.datetime + * datetime.date + * datetime.time + + See Also + -------- + deregister_matplotlib_converter + """ + # Renamed in pandas.plotting.__init__ + global _WARN + + if explicit: + _WARN = False + + pairs = get_pairs() + for type_, cls in pairs: + converter = cls() + if type_ in units.registry: + previous = units.registry[type_] + _mpl_units[type_] = previous + units.registry[type_] = converter + + +def deregister(): + """Remove pandas' formatters and converters + + Removes the custom converters added by :func:`register`. This + attempts to set the state of the registry back to the state before + pandas registered its own units. Converters for pandas' own types like + Timestamp and Period are removed completely. Converters for types + pandas overwrites, like ``datetime.datetime``, are restored to their + original value. + + See Also + -------- + deregister_matplotlib_converters + """ + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(units.registry.get(type_)) is cls: + units.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, + TimeConverter}: + # make it idempotent by excluding ours. + units.registry[unit] = formatter + + +def _check_implicitly_registered(): + global _WARN + + if _WARN: + msg = ("Using an implicitly registered datetime converter for a " + "matplotlib plotting method. The converter was registered " + "by pandas on import. Future versions of pandas will require " + "you to explicitly register matplotlib converters.\n\n" + "To register the converters:\n\t" + ">>> from pandas.plotting import register_matplotlib_converters" + "\n\t" + ">>> register_matplotlib_converters()") + warnings.warn(msg, FutureWarning) + _WARN = False def _to_ordinalf(tm): @@ -189,6 +272,7 @@ class DatetimeConverter(dates.DateConverter): @staticmethod def convert(values, unit, axis): # values might be a 1-d array, or a list-like of arrays. + _check_implicitly_registered() if is_nested_list_like(values): values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] @@ -273,6 +357,7 @@ class PandasAutoDateLocator(dates.AutoDateLocator): def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' + _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days @@ -314,6 +399,7 @@ def get_unit_generic(freq): def __call__(self): # if no data have been set, this will tank with a ValueError + _check_implicitly_registered() try: dmin, dmax = self.viewlim_to_dt() except ValueError: @@ -914,6 +1000,8 @@ def _get_default_locs(self, vmin, vmax): def __call__(self): 'Return the locations of the ticks.' # axis calls Locator.set_axis inside set_m<xxxx>_formatter + _check_implicitly_registered() + vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None @@ -998,6 +1086,8 @@ def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax + _check_implicitly_registered() + self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) @@ -1009,6 +1099,8 @@ def set_locs(self, locs): self._set_default_format(vmin, vmax) def __call__(self, x, pos=0): + _check_implicitly_registered() + if self.formatdict is None: return '' else: @@ -1039,6 +1131,7 @@ def format_timedelta_ticks(x, pos, n_decimals): return s def __call__(self, x, pos=0): + _check_implicitly_registered() (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) if n_decimals > 9: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 43f33cf30dea1..e1380953e4519 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -11,6 +11,7 @@ from pandas.util._decorators import cache_readonly from pandas.core.base import PandasObject +from pandas.core.config import get_option from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.dtypes.common import ( is_list_like, @@ -40,16 +41,13 @@ _get_xlim, _set_ticks_props, format_date_labels) -_registered = False - - -def _setup(): - # delay the import of matplotlib until nescessary - global _registered - if not _registered: - from pandas.plotting import _converter - _converter.register() - _registered = True +try: + from pandas.plotting import _converter +except ImportError: + pass +else: + if get_option('plotting.matplotlib.register_converters'): + _converter.register(explicit=True) def _get_standard_kind(kind): @@ -99,7 +97,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, secondary_y=False, colormap=None, table=False, layout=None, **kwds): - _setup() + _converter._WARN = False self.data = data self.by = by @@ -383,12 +381,16 @@ def _add_table(self): def _post_plot_logic_common(self, ax, data): """Common post process for each axes""" - labels = [pprint_thing(key) for key in data.index] - labels = dict(zip(range(len(data.index)), labels)) + + def get_label(i): + try: + return pprint_thing(data.index[i]) + except Exception: + return '' if self.orientation == 'vertical' or self.orientation is None: if self._need_to_set_index: - xticklabels = [labels.get(x, '') for x in ax.get_xticks()] + xticklabels = [get_label(x) for x in ax.get_xticks()] ax.set_xticklabels(xticklabels) self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize) @@ -400,7 +402,7 @@ def _post_plot_logic_common(self, ax, data): elif self.orientation == 'horizontal': if self._need_to_set_index: - yticklabels = [labels.get(y, '') for y in ax.get_yticks()] + yticklabels = [get_label(y) for y in ax.get_yticks()] ax.set_yticklabels(yticklabels) self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize) @@ -2059,7 +2061,7 @@ def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt - _setup() + _converter._WARN = False ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) @@ -2155,7 +2157,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, kwds : other plotting keyword arguments To be passed to hist function """ - _setup() + _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, @@ -2289,6 +2291,8 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, ------- axes: collection of Matplotlib Axes """ + _converter._WARN = False + def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) @@ -2352,7 +2356,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ - _setup() + _converter._WARN = False if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 3d04973ed0009..56b5311326e98 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -1,5 +1,7 @@ # TODO: Use the fact that axis can have units to simplify the process +import functools + import numpy as np from matplotlib import pylab @@ -293,6 +295,10 @@ def format_timedelta_ticks(x, pos, n_decimals): return s +def _format_coord(freq, t, y): + return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y) + + def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). @@ -327,8 +333,7 @@ def format_dateaxis(subplot, freq, index): subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info - subplot.format_coord = lambda t, y: ( - "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, TimedeltaIndex): subplot.xaxis.set_major_formatter( diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index d9fb458c83529..82a35fa711e8c 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -38,17 +38,17 @@ def test_downcast_conv(self): arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - assert (np.array_equal(result, arr)) + tm.assert_numpy_array_equal(result, arr) arr = np.array([8., 8., 8., 8., 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) arr = np.array([8., 8., 8., 8., 9.0000000000005]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) # GH16875 coercing of bools ser = Series([True, True, False]) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 70273f9e999cf..7195cb43a70dc 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -416,6 +416,12 @@ def test_length_zero(self): result = lib.infer_dtype([]) assert result == 'empty' + # GH 18004 + arr = np.array([np.array([], dtype=object), + np.array([], dtype=object)]) + result = lib.infer_dtype(arr) + assert result == 'empty' + def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') result = lib.infer_dtype(arr) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 1bac4037e99c9..97ab0deb50d50 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -440,7 +440,8 @@ def test_nunique(self): Series({0: 1, 1: 3, 2: 2})) def test_sum(self): - self._check_stat_op('sum', np.sum, has_numeric_only=True) + self._check_stat_op('sum', np.sum, has_numeric_only=True, + skipna_alternative=np.nansum) # mixed types (with upcasting happening) self._check_stat_op('sum', np.sum, @@ -716,7 +717,8 @@ def alt(x): def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False): + check_dates=False, check_less_precise=False, + skipna_alternative=None): if frame is None: frame = self.frame # set some NAs @@ -737,15 +739,11 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, assert len(result) if has_skipna: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) - def wrapper(x): return alternative(x.values) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper), @@ -797,8 +795,11 @@ def wrapper(x): r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) if name in ['sum', 'prod']: - assert np.isnan(r0).all() - assert np.isnan(r1).all() + unit = int(name == 'prod') + expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], @@ -936,6 +937,66 @@ def test_sum_corner(self): assert len(axis0) == 0 assert len(axis1) == 0 + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_sum_prod_nanops(self, method, unit): + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [unit, unit], + "b": [unit, np.nan], + "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + + # min_count=1 + result = getattr(df, method)(min_count=1) + expected = pd.Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(min_count=0) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(min_count=1) + expected = pd.Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(min_count=5) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=6) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [0, 0], + "b": [0, np.nan], + "c": [np.nan, np.nan]}) + + df2 = df.apply(pd.to_timedelta) + + # 0 by default + result = df2.sum() + expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + def test_sum_object(self): values = self.frame.values.astype(int) frame = DataFrame(values, index=self.frame.index, diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 1e2f630401c89..343e235fb741c 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -884,6 +884,27 @@ def test_filter_regex_search(self): exp = df[[x for x in df.columns if 'BB' in x]] assert_frame_equal(result, exp) + @pytest.mark.parametrize('name,expected', [ + ('a', DataFrame({u'a': [1, 2]})), + (u'a', DataFrame({u'a': [1, 2]})), + (u'あ', DataFrame({u'あ': [3, 4]})) + ]) + def test_filter_unicode(self, name, expected): + # GH13101 + df = DataFrame({u'a': [1, 2], u'あ': [3, 4]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + + @pytest.mark.parametrize('name', ['a', u'a']) + def test_filter_bytestring(self, name): + # GH13101 + df = DataFrame({b'a': [1, 2], b'b': [3, 4]}) + expected = DataFrame({b'a': [1, 2]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + def test_filter_corner(self): empty = DataFrame() diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c55c79ef18602..8291e9d452348 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1913,10 +1913,11 @@ def test_from_records_len0_with_columns(self): # #2633 result = DataFrame.from_records([], index='foo', columns=['foo', 'bar']) + expected = Index(['bar']) - assert np.array_equal(result.columns, ['bar']) assert len(result) == 0 assert result.index.name == 'foo' + tm.assert_index_equal(result.columns, expected) def test_to_frame_with_falsey_names(self): # GH 16114 diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 5bdb76494f4c8..7d2d18db8d41c 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- +from datetime import datetime + import pytest +import pytz import collections import numpy as np @@ -249,3 +252,18 @@ def test_to_dict_box_scalars(self): result = DataFrame(d).to_dict(orient='records') assert isinstance(result[0]['a'], (int, long)) + + def test_frame_to_dict_tz(self): + # GH18372 When converting to dict with orient='records' columns of + # datetime that are tz-aware were not converted to required arrays + data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),), + (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)] + df = DataFrame(list(data), columns=["d", ]) + + result = df.to_dict(orient='records') + expected = [ + {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)}, + {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)}, + ] + tm.assert_dict_equal(result[0], expected[0]) + tm.assert_dict_equal(result[1], expected[1]) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index abb528f0d2179..5adcd3b6855ce 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -10,6 +10,8 @@ from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, compat, concat, option_context) from pandas.compat import u +from pandas import _np_version_under1p14 + from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, @@ -531,7 +533,12 @@ def test_astype_str(self): assert_frame_equal(result, expected) result = DataFrame([1.12345678901234567890]).astype(tt) - expected = DataFrame(['1.12345678901']) + if _np_version_under1p14: + # < 1.14 truncates + expected = DataFrame(['1.12345678901']) + else: + # >= 1.14 preserves the full repr + expected = DataFrame(['1.1234567890123457']) assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series]) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 4f77ba0ae1f5a..5b903c5a1eaf6 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -448,7 +448,7 @@ def test_as_matrix_duplicates(self): expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']], dtype=object) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) def test_set_value_by_index(self): # See gh-12344 diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 4162a586f8063..ca8a0d8bda3ab 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1203,3 +1203,16 @@ def test_period_index_date_overflow(self): expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n' assert result == expected + + def test_multi_index_header(self): + # see gh-5539 + columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), + ("b", 1), ("b", 2)]) + df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df.columns = columns + + header = ["a", "b", "c", "d"] + result = df.to_csv(header=header) + + expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n" + assert result == expected diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 913d3bcc09869..ad1a322fdaae9 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -809,26 +809,60 @@ def test__cython_agg_general(self): exc.args += ('operation: %s' % op, ) raise - def test_cython_agg_empty_buckets(self): - ops = [('mean', np.mean), - ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), - ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), - ('min', np.min), - ('max', np.max), ] - + @pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), + ('var', lambda x: np.var(x, ddof=1)), + ('min', np.min), + ('max', np.max), ] + ) + def test_cython_agg_empty_buckets(self, op, targop): df = pd.DataFrame([11, 12, 13]) grps = range(0, 55, 5) - for op, targop in ops: - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op,) - raise + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) + expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise + + def test_cython_agg_empty_buckets_nanops(self): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + # add / sum + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") + def test_agg_category_nansum(self): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) def test_agg_over_numpy_arrays(self): # GH 3788 diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index fdc03acd3e931..d4f35aa8755d1 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,6 +17,142 @@ class TestGroupByCategorical(MixIn): + def test_groupby(self): + + cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], + categories=["a", "b", "c", "d"], ordered=True) + data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) + + exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True) + expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) + result = data.groupby("b").mean() + tm.assert_frame_equal(result, expected) + + raw_cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + raw_cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) + + # single grouper + gb = df.groupby("A") + exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) + expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers + gb = df.groupby(['A', 'B']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True)], + names=['A', 'B']) + expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, + np.nan, np.nan, np.nan]}, + index=exp_index) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers with a non-cat + df = df.copy() + df['C'] = ['foo', 'bar'] * 2 + gb = df.groupby(['A', 'B', 'C']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True), + ['foo', 'bar']], + names=['A', 'B', 'C']) + expected = DataFrame({'values': Series( + np.nan, index=exp_index)}).sort_index() + expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # GH 8623 + x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], + [1, 'John P. Doe']], + columns=['person_id', 'person_name']) + x['person_name'] = Categorical(x.person_name) + + g = x.groupby(['person_id']) + result = g.transform(lambda x: x) + tm.assert_frame_equal(result, x[['person_name']]) + + result = x.drop_duplicates('person_name') + expected = x.iloc[[0, 1]] + tm.assert_frame_equal(result, expected) + + def f(x): + return x.drop_duplicates('person_name').iloc[0] + + result = g.apply(f) + expected = x.iloc[[0, 1]].copy() + expected.index = Index([1, 2], name='person_id') + expected['person_name'] = expected['person_name'].astype('object') + tm.assert_frame_equal(result, expected) + + # GH 9921 + # Monotonic + df = DataFrame({"a": [5, 15, 25]}) + c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) + + # Filter + tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) + tm.assert_frame_equal(df.groupby(c).filter(np.all), df) + + # Non-monotonic + df = DataFrame({"a": [5, 15, 25, -5]}) + c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) + + # GH 9603 + df = DataFrame({'a': [1, 0, 0, 0]}) + c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd'))) + result = df.groupby(c).apply(len) + + exp_index = CategoricalIndex( + c.values.categories, ordered=c.values.ordered) + expected = Series([1, 0, 0, 0], index=exp_index) + expected.index.name = 'a' + tm.assert_series_equal(result, expected) + + def test_groupby_sort(self): + + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + df = DataFrame({'value': np.random.randint(0, 10000, 100)}) + labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=['value'], ascending=True) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), + right=False, labels=cat_labels) + + res = df.groupby(['value_group'])['value_group'].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + exp.index = CategoricalIndex(exp.index, name=exp.index.name) + tm.assert_series_equal(res, exp) + def test_level_groupby_get_group(self): # GH15155 df = DataFrame(data=np.arange(2, 22, 2), @@ -526,3 +662,53 @@ def test_groupby_categorical_two_columns(self): "C3": [nan, nan, nan, nan, 10, 100, nan, nan, nan, nan, 200, 34]}, index=idx) tm.assert_frame_equal(res, exp) + + def test_empty_sum(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 0 by default + result = df.groupby("A").B.sum() + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.sum(min_count=0) + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.sum(min_count=1) + expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count>1 + result = df.groupby("A").B.sum(min_count=2) + expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + def test_empty_prod(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 1 by default + result = df.groupby("A").B.prod() + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.prod(min_count=0) + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.prod(min_count=1) + expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 485241d593d4f..787d99086873e 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -2,9 +2,11 @@ from __future__ import print_function import numpy as np +import pytest -from pandas import (DataFrame, Series, MultiIndex) -from pandas.util.testing import assert_series_equal +from pandas import (DataFrame, Series, MultiIndex, Timestamp, Timedelta, + Period) +from pandas.util.testing import (assert_series_equal, assert_frame_equal) from pandas.compat import (range, product as cart_product) @@ -195,3 +197,18 @@ def test_ngroup_respects_groupby_order(self): g.ngroup()) assert_series_equal(Series(df['group_index'].values), g.cumcount()) + + @pytest.mark.parametrize('datetimelike', [ + [Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)], + [Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)], + [Timedelta(x, unit="h") for x in range(1, 4)], + [Period(freq="2W", year=2017, month=x) for x in range(1, 4)]]) + def test_count_with_datetimelike(self, datetimelike): + # test for #13393, where DataframeGroupBy.count() fails + # when counting a datetimelike column. + + df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike}) + res = df.groupby('x').count() + expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) + expected.index.name = "x" + assert_frame_equal(expected, res) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9d25117fbd954..7a5581c897231 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -10,7 +10,7 @@ from pandas import (date_range, bdate_range, Timestamp, Index, MultiIndex, DataFrame, Series, - concat, Panel, DatetimeIndex) + concat, Panel, DatetimeIndex, CategoricalIndex) from pandas.errors import UnsupportedFunctionCall, PerformanceWarning from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, @@ -28,6 +28,15 @@ from .common import MixIn +class TestGrouper(object): + + def test_repr(self): + # GH18203 + result = repr(pd.Grouper(key='A', level='B')) + expected = "Grouper(key='A', level='B', axis=0, sort=False)" + assert result == expected + + class TestGroupBy(MixIn): def test_basic(self): @@ -253,6 +262,29 @@ def test_grouper_column_and_index(self): expected = df_single.reset_index().groupby(['inner', 'B']).mean() assert_frame_equal(result, expected) + def test_groupby_categorical_index_and_columns(self): + # GH18432 + columns = ['A', 'B', 'A', 'B'] + categories = ['B', 'A'] + data = np.ones((5, 4), int) + cat_columns = CategoricalIndex(columns, + categories=categories, + ordered=True) + df = DataFrame(data=data, columns=cat_columns) + result = df.groupby(axis=1, level=0).sum() + expected_data = 2 * np.ones((5, 2), int) + expected_columns = CategoricalIndex(categories, + categories=categories, + ordered=True) + expected = DataFrame(data=expected_data, columns=expected_columns) + assert_frame_equal(result, expected) + + # test transposed version + df = DataFrame(data.T, index=cat_columns) + result = df.groupby(axis=0, level=0).sum() + expected = DataFrame(data=expected_data.T, index=expected_columns) + assert_frame_equal(result, expected) + def test_grouper_getting_correct_binner(self): # GH 10063 @@ -3818,7 +3850,7 @@ def h(df, arg3): # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') - expected = pd.Series([-79.5160891089, -78.4839108911, None], + expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) assert_series_equal(expected, result) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c8503b16a0e16..d359bfa5351a9 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -41,12 +41,11 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame( - {'Quantity': np.nan}, + {'Quantity': 0}, index=date_range('20130901 13:00:00', '20131205 13:00:00', freq='5D', name='Date', closed='left')) - expected.iloc[[0, 6, 18], 0] = np.array( - [24., 6., 9.], dtype='float64') + expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype='int64') result1 = df.resample('5D') .sum() assert_frame_equal(result1, expected) @@ -245,6 +244,8 @@ def test_timegrouper_with_reg_groups(self): result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum() assert_frame_equal(result, expected) + @pytest.mark.parametrize('freq', ['D', 'M', 'A', 'Q-APR']) + def test_timegrouper_with_reg_groups_freq(self, freq): # GH 6764 multiple grouping with/without sort df = DataFrame({ 'date': pd.to_datetime([ @@ -258,20 +259,24 @@ def test_timegrouper_with_reg_groups(self): 'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12] }).set_index('date') - for freq in ['D', 'M', 'A', 'Q-APR']: - expected = df.groupby('user_id')[ - 'whole_cost'].resample( - freq).sum().dropna().reorder_levels( - ['date', 'user_id']).sort_index().astype('int64') - expected.name = 'whole_cost' - - result1 = df.sort_index().groupby([pd.Grouper(freq=freq), - 'user_id'])['whole_cost'].sum() - assert_series_equal(result1, expected) - - result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ - 'whole_cost'].sum() - assert_series_equal(result2, expected) + expected = ( + df.groupby('user_id')['whole_cost'] + .resample(freq) + .sum(min_count=1) # XXX + .dropna() + .reorder_levels(['date', 'user_id']) + .sort_index() + .astype('int64') + ) + expected.name = 'whole_cost' + + result1 = df.sort_index().groupby([pd.Grouper(freq=freq), + 'user_id'])['whole_cost'].sum() + assert_series_equal(result1, expected) + + result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ + 'whole_cost'].sum() + assert_series_equal(result2, expected) def test_timegrouper_get_group(self): # GH 6914 diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 456e5a9bd6439..3a57337efea6f 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -996,3 +996,16 @@ def test_searchsorted_monotonic(self, indices): # non-monotonic should raise. with pytest.raises(ValueError): indices._searchsorted_monotonic(value, side='left') + + def test_putmask_with_wrong_mask(self): + # GH18368 + index = self.create_index() + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) + 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) - 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask('foo', 1) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3b40ef092f364..1349f2f761a2f 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -20,11 +20,6 @@ START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) -def eq_gen_range(kwargs, expected): - rng = generate_range(**kwargs) - assert (np.array_equal(list(rng), expected)) - - class TestDateRanges(TestData): def test_date_range_gen_error(self): @@ -201,20 +196,23 @@ def test_generate_cday(self): assert rng1 == rng2 def test_1(self): - eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2), - [datetime(2009, 3, 25), datetime(2009, 3, 26)]) + rng = list(generate_range(start=datetime(2009, 3, 25), periods=2)) + expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] + assert rng == expected def test_2(self): - eq_gen_range(dict(start=datetime(2008, 1, 1), - end=datetime(2008, 1, 3)), - [datetime(2008, 1, 1), - datetime(2008, 1, 2), - datetime(2008, 1, 3)]) + rng = list(generate_range(start=datetime(2008, 1, 1), + end=datetime(2008, 1, 3))) + expected = [datetime(2008, 1, 1), + datetime(2008, 1, 2), + datetime(2008, 1, 3)] + assert rng == expected def test_3(self): - eq_gen_range(dict(start=datetime(2008, 1, 5), - end=datetime(2008, 1, 6)), - []) + rng = list(generate_range(start=datetime(2008, 1, 5), + end=datetime(2008, 1, 6))) + expected = [] + assert rng == expected def test_precision_finer_than_offset(self): # GH 9907 @@ -236,6 +234,22 @@ def test_precision_finer_than_offset(self): tm.assert_index_equal(result1, expected1) tm.assert_index_equal(result2, expected2) + dt1, dt2 = '2017-01-01', '2017-01-01' + tz1, tz2 = 'US/Eastern', 'Europe/London' + + @pytest.mark.parametrize("start,end", [ + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)), + (pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1)) + ]) + def test_mismatching_tz_raises_err(self, start, end): + # issue 18488 + with pytest.raises(TypeError): + pd.date_range(start, end) + with pytest.raises(TypeError): + pd.DatetimeIndex(start, end, freq=BDay()) + class TestBusinessDateRange(object): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 8d9ac59cf9883..20a9916ad6bc4 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -211,6 +211,40 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq == 'D' + def test_datetimeindex_sub_timestamp_overflow(self): + dtimax = pd.to_datetime(['now', pd.Timestamp.max]) + dtimin = pd.to_datetime(['now', pd.Timestamp.min]) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants: + with pytest.raises(OverflowError): + dtimax - variant + + expected = pd.Timestamp.max.value - tspos.value + for variant in ts_pos_variants: + res = dtimax - variant + assert res[1].value == expected + + expected = pd.Timestamp.min.value - tsneg.value + for variant in ts_neg_variants: + res = dtimin - variant + assert res[1].value == expected + + for variant in ts_pos_variants: + with pytest.raises(OverflowError): + dtimin - variant + def test_week_of_month_frequency(self): # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise d1 = date(2002, 9, 1) diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index e7d03aa193cbd..04c180350fb72 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -2,9 +2,10 @@ import pytest -from datetime import datetime +from datetime import datetime, date import numpy as np import pandas as pd +import operator as op from pandas import (DatetimeIndex, Series, DataFrame, date_range, Index, Timedelta, Timestamp) @@ -268,3 +269,21 @@ def test_loc_datetime_length_one(self): result = df.loc['2016-10-01T00:00:00':] tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize('datetimelike', [ + Timestamp('20130101'), datetime(2013, 1, 1), + date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) + @pytest.mark.parametrize('op,expected', [ + (op.lt, [True, False, False, False]), + (op.le, [True, True, False, False]), + (op.eq, [False, True, False, False]), + (op.gt, [False, False, False, True])]) + def test_selection_by_datetimelike(self, datetimelike, op, expected): + # GH issue #17965, test for ability to compare datetime64[ns] columns + # to datetimelike + df = DataFrame({'A': [pd.Timestamp('20120101'), + pd.Timestamp('20130101'), + np.nan, pd.Timestamp('20130103')]}) + result = op(df.A, datetimelike) + expected = Series(expected, name='A') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 330ec9f357655..c7944c078d8c4 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -960,6 +960,7 @@ def test_guess_datetime_format_nopadding(self): for dt_string, dt_format in dt_string_to_format: assert tools._guess_datetime_format(dt_string) == dt_format + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") def test_guess_datetime_format_for_array(self): tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d8ec23b9c7e0e..5e40e06d57413 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -4,6 +4,7 @@ import pandas.util.testing as tm from pandas.core.indexes.api import Index, CategoricalIndex +from pandas.core.dtypes.dtypes import CategoricalDtype from .common import Base from pandas.compat import range, PY3 @@ -95,6 +96,11 @@ def test_construction(self): 1, -1, 0], dtype='int8')) assert result.ordered + result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True) + expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True, + dtype='category') + tm.assert_index_equal(result, expected, exact=True) + # turn me to an Index result = Index(np.array(ci)) assert isinstance(result, Index) @@ -125,6 +131,25 @@ def test_construction_with_dtype(self): result = CategoricalIndex(idx, categories=idx, ordered=True) tm.assert_index_equal(result, expected, exact=True) + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH18109 + data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = pd.CategoricalIndex(data, dtype=dtype) + expected = pd.CategoricalIndex(data, categories=cats, + ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # error to combine categories or ordered and dtype keywords args + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, categories=cats, dtype=dtype) + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, ordered=ordered, dtype=dtype) + def test_create_categorical(self): # https://github.com/pandas-dev/pandas/pull/17513 # The public CI constructor doesn't hit this code path with diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index b55bab3a210cc..399d88309072e 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -6,6 +6,7 @@ from pandas import (Interval, IntervalIndex, Index, isna, interval_range, Timestamp, Timedelta, compat, date_range, timedelta_range, DateOffset) +from pandas.compat import zip from pandas.tseries.offsets import Day from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base @@ -13,6 +14,11 @@ import pandas as pd +@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + class TestIntervalIndex(Base): _holder = IntervalIndex @@ -22,34 +28,63 @@ def setup_method(self, method): [(0, 1), np.nan, (1, 2)]) self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) - def create_index(self): - return IntervalIndex.from_breaks(np.arange(10)) + def create_index(self, closed='right'): + return IntervalIndex.from_breaks(np.arange(3), closed=closed) - def test_constructors(self): - expected = self.index - actual = IntervalIndex.from_breaks(np.arange(3), closed='right') - assert expected.equals(actual) + def create_index_with_nan(self, closed='right'): + return IntervalIndex.from_tuples( + [(0, 1), np.nan, (1, 2)], closed=closed) - alternate = IntervalIndex.from_breaks(np.arange(3), closed='left') - assert not expected.equals(alternate) + @pytest.mark.parametrize('name', [None, 'foo']) + def test_constructors(self, closed, name): + left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4]) + ivs = [Interval(l, r, closed=closed) for l, r in zip(left, right)] + expected = IntervalIndex._simple_new( + left=left, right=right, closed=closed, name=name) - actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex.from_intervals(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1, - closed='right') - assert expected.equals(actual) + result = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index([Interval(0, 1), Interval(1, 2)]) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_arrays( + left.values, right.values, closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index(expected) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_tuples( + zip(left, right), closed=closed, name=name) + tm.assert_index_equal(result, expected) + + result = Index(ivs, name=name) + assert isinstance(result, IntervalIndex) + tm.assert_index_equal(result, expected) + + # idempotent + tm.assert_index_equal(Index(expected), expected) + tm.assert_index_equal(IntervalIndex(expected), expected) + + result = IntervalIndex.from_intervals( + expected.values, name=expected.name) + tm.assert_index_equal(result, expected) + + left, right = expected.left, expected.right + result = IntervalIndex.from_arrays( + left, right, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + result = IntervalIndex.from_tuples( + expected.to_tuples(), closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + breaks = expected.left.tolist() + [expected.right[-1]] + result = IntervalIndex.from_breaks( + breaks, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) def test_constructors_other(self): @@ -66,43 +101,57 @@ def test_constructors_other(self): def test_constructors_errors(self): # scalar - with pytest.raises(TypeError): + msg = ('IntervalIndex(...) must be called with a collection of ' + 'some kind, 5 was passed') + with pytest.raises(TypeError, message=msg): IntervalIndex(5) # not an interval - with pytest.raises(TypeError): + msg = "type <class 'numpy.int32'> with value 0 is not an interval" + with pytest.raises(TypeError, message=msg): IntervalIndex([0, 1]) - with pytest.raises(TypeError): + with pytest.raises(TypeError, message=msg): IntervalIndex.from_intervals([0, 1]) # invalid closed - with pytest.raises(ValueError): + msg = "invalid options for 'closed': invalid" + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid') # mismatched closed - with pytest.raises(ValueError): + msg = 'intervals must all be closed on the same side' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2, closed='left')]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 10], [3, 5]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): Index([Interval(0, 1), Interval(2, 3, closed='left')]) # no point in nesting periods in an IntervalIndex - with pytest.raises(ValueError): + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_breaks( pd.period_range('2000-01-01', periods=3)) - def test_constructors_datetimelike(self): + # decreasing breaks/arrays + msg = 'left side of interval must be <= right side' + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_breaks(range(10, -1, -1)) + + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1)) + + def test_constructors_datetimelike(self, closed): # DTI / TDI for idx in [pd.date_range('20130101', periods=5), pd.timedelta_range('1 day', periods=5)]: - result = IntervalIndex.from_breaks(idx) - expected = IntervalIndex.from_breaks(idx.values) + result = IntervalIndex.from_breaks(idx, closed=closed) + expected = IntervalIndex.from_breaks(idx.values, closed=closed) tm.assert_index_equal(result, expected) expected_scalar_type = type(idx[0]) @@ -117,8 +166,8 @@ def f(): IntervalIndex.from_intervals([0.997, 4.0]) pytest.raises(TypeError, f) - def test_properties(self): - index = self.index + def test_properties(self, closed): + index = self.create_index(closed=closed) assert len(index) == 2 assert index.size == 2 assert index.shape == (2, ) @@ -127,14 +176,15 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, 2])) tm.assert_index_equal(index.mid, Index([0.5, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) # with nans - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert len(index) == 3 assert index.size == 3 assert index.shape == (3, ) @@ -143,41 +193,43 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, np.nan, 2])) tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), np.nan, - Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), np.nan, + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) - def test_with_nans(self): - index = self.index + def test_with_nans(self, closed): + index = self.create_index(closed=closed) assert not index.hasnans tm.assert_numpy_array_equal(index.isna(), np.array([False, False])) tm.assert_numpy_array_equal(index.notna(), np.array([True, True])) - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert index.hasnans tm.assert_numpy_array_equal(index.notna(), np.array([True, False, True])) tm.assert_numpy_array_equal(index.isna(), np.array([False, True, False])) - def test_copy(self): - actual = self.index.copy() - assert actual.equals(self.index) + def test_copy(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + + result = expected.copy() + assert result.equals(expected) - actual = self.index.copy(deep=True) - assert actual.equals(self.index) - assert actual.left is not self.index.left + result = expected.copy(deep=True) + assert result.equals(expected) + assert result.left is not expected.left - def test_ensure_copied_data(self): + def test_ensure_copied_data(self, closed): # exercise the copy flag in the constructor # not copying - index = self.index + index = self.create_index(closed=closed) result = IntervalIndex(index, copy=False) tm.assert_numpy_array_equal(index.left.values, result.left.values, check_same='same') @@ -191,23 +243,34 @@ def test_ensure_copied_data(self): tm.assert_numpy_array_equal(index.right.values, result.right.values, check_same='copy') - def test_equals(self): + def test_equals(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + assert expected.equals(expected) + assert expected.equals(expected.copy()) - idx = self.index - assert idx.equals(idx) - assert idx.equals(idx.copy()) + assert not expected.equals(expected.astype(object)) + assert not expected.equals(np.array(expected)) + assert not expected.equals(list(expected)) - assert not idx.equals(idx.astype(object)) - assert not idx.equals(np.array(idx)) - assert not idx.equals(list(idx)) + assert not expected.equals([1, 2]) + assert not expected.equals(np.array([1, 2])) + assert not expected.equals(pd.date_range('20130101', periods=2)) - assert not idx.equals([1, 2]) - assert not idx.equals(np.array([1, 2])) - assert not idx.equals(pd.date_range('20130101', periods=2)) + expected_name1 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='foo') + expected_name2 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='bar') + assert expected.equals(expected_name1) + assert expected_name1.equals(expected_name2) - def test_astype(self): + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + expected_other_closed = IntervalIndex.from_breaks( + np.arange(5), closed=other_closed) + assert not expected.equals(expected_other_closed) - idx = self.index + def test_astype(self, closed): + + idx = self.create_index(closed=closed) for dtype in [np.int64, np.float64, 'datetime64[ns]', 'datetime64[ns, US/Eastern]', 'timedelta64', @@ -227,24 +290,24 @@ def test_astype(self): expected = pd.Categorical(idx, ordered=True) tm.assert_categorical_equal(result, expected) - def test_where(self): - expected = self.index - result = self.index.where(self.index.notna()) + def test_where(self, closed): + expected = self.create_index(closed=closed) + result = expected.where(expected.notna()) tm.assert_index_equal(result, expected) - idx = IntervalIndex.from_breaks([1, 2]) + idx = IntervalIndex.from_breaks([1, 2], closed=closed) result = idx.where([True, False]) expected = IntervalIndex.from_intervals( - [Interval(1.0, 2.0, closed='right'), np.nan]) + [Interval(1.0, 2.0, closed=closed), np.nan]) tm.assert_index_equal(result, expected) def test_where_array_like(self): pass - def test_delete(self): - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.delete(0) - assert expected.equals(actual) + def test_delete(self, closed): + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + result = self.create_index(closed=closed).delete(0) + tm.assert_index_equal(result, expected) def test_insert(self): expected = IntervalIndex.from_breaks(range(4)) @@ -255,113 +318,128 @@ def test_insert(self): pytest.raises(ValueError, self.index.insert, 0, Interval(2, 3, closed='left')) - def test_take(self): - actual = self.index.take([0, 1]) - assert self.index.equals(actual) + def test_take(self, closed): + index = self.create_index(closed=closed) - expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2]) - actual = self.index.take([0, 0, 1]) - assert expected.equals(actual) + actual = index.take([0, 1]) + tm.assert_index_equal(actual, index) + + expected = IntervalIndex.from_arrays( + [0, 0, 1], [1, 1, 2], closed=closed) + actual = index.take([0, 0, 1]) + tm.assert_index_equal(actual, expected) - def test_unique(self): + def test_unique(self, closed): # unique non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_unique # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)]) + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) assert idx.is_unique # unique overlapping - shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_unique # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)]) + idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) assert idx.is_unique # duplicate - idx = IntervalIndex.from_tuples([(0, 1), (0, 1), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (0, 1), (2, 3)], closed=closed) assert not idx.is_unique # unique mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')]) + idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed) assert idx.is_unique # duplicate mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b'), (0, 1)]) + idx = IntervalIndex.from_tuples( + [(0, 1), ('a', 'b'), (0, 1)], closed=closed) assert not idx.is_unique # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_unique - def test_monotonic(self): + def test_monotonic(self, closed): # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)]) + idx = IntervalIndex.from_tuples( + [(4, 5), (2, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (4, 5), (2, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 2), (0.5, 2.5), (1, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)]) + idx = IntervalIndex.from_tuples( + [(1, 3), (0.5, 2.5), (0, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0.5, 2.5), (0, 2), (1, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)]) + idx = pd.IntervalIndex.from_tuples( + [(2, 3), (1, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)]) + idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) assert idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing @@ -395,24 +473,24 @@ def test_repr_max_seq_item_setting(self): def test_repr_roundtrip(self): super(TestIntervalIndex, self).test_repr_roundtrip() - def test_get_item(self): + def test_get_item(self, closed): i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), - closed='right') - assert i[0] == Interval(0.0, 1.0) - assert i[1] == Interval(1.0, 2.0) + closed=closed) + assert i[0] == Interval(0.0, 1.0, closed=closed) + assert i[1] == Interval(1.0, 2.0, closed=closed) assert isna(i[2]) result = i[0:1] - expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right') + expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed) tm.assert_index_equal(result, expected) result = i[0:2] - expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right') + expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed) tm.assert_index_equal(result, expected) result = i[1:3] expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan), - closed='right') + closed=closed) tm.assert_index_equal(result, expected) def test_get_loc_value(self): @@ -581,20 +659,22 @@ def testcontains(self): assert not i.contains(20) assert not i.contains(-20) - def test_dropna(self): + def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)]) + expected = IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0)], closed=closed) - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan]) + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan]) + ii = IntervalIndex.from_arrays( + [0, 1, np.nan], [1, 2, np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - def test_non_contiguous(self): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)]) + def test_non_contiguous(self, closed): + index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) expected = np.array([0, -1, 1], dtype='intp') @@ -602,31 +682,32 @@ def test_non_contiguous(self): assert 1.5 not in index - def test_union(self): - other = IntervalIndex.from_arrays([2], [3]) - expected = IntervalIndex.from_arrays(range(3), range(1, 4)) - actual = self.index.union(other) + def test_union(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_arrays([2], [3], closed=closed) + expected = IntervalIndex.from_arrays( + range(3), range(1, 4), closed=closed) + actual = idx.union(other) assert expected.equals(actual) - actual = other.union(self.index) + actual = other.union(idx) assert expected.equals(actual) - tm.assert_index_equal(self.index.union(self.index), self.index) - tm.assert_index_equal(self.index.union(self.index[:1]), - self.index) + tm.assert_index_equal(idx.union(idx), idx) + tm.assert_index_equal(idx.union(idx[:1]), idx) - def test_intersection(self): - other = IntervalIndex.from_breaks([1, 2, 3]) - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.intersection(other) + def test_intersection(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_breaks([1, 2, 3], closed=closed) + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + actual = idx.intersection(other) assert expected.equals(actual) - tm.assert_index_equal(self.index.intersection(self.index), - self.index) + tm.assert_index_equal(idx.intersection(idx), idx) - def test_difference(self): - tm.assert_index_equal(self.index.difference(self.index[:1]), - self.index[1:]) + def test_difference(self, closed): + idx = self.create_index(closed=closed) + tm.assert_index_equal(idx.difference(idx[:1]), idx[1:]) def test_symmetric_difference(self): result = self.index[:1].symmetric_difference(self.index[1:]) @@ -639,11 +720,12 @@ def test_set_operation_errors(self): other = IntervalIndex.from_breaks([0, 1, 2], closed='neither') pytest.raises(ValueError, self.index.union, other) - def test_isin(self): - actual = self.index.isin(self.index) + def test_isin(self, closed): + idx = self.create_index(closed=closed) + actual = idx.isin(idx) tm.assert_numpy_array_equal(np.array([True, True]), actual) - actual = self.index.isin(self.index[:1]) + actual = idx.isin(idx[:1]) tm.assert_numpy_array_equal(np.array([True, False]), actual) def test_comparison(self): @@ -702,25 +784,28 @@ def test_comparison(self): with pytest.raises(ValueError): self.index > np.arange(3) - def test_missing_values(self): - idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)]) - idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2]) + def test_missing_values(self, closed): + idx = Index([np.nan, Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)]) + idx2 = IntervalIndex.from_arrays( + [np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) with pytest.raises(ValueError): - IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2])) + IntervalIndex.from_arrays( + [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed) tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) - def test_sort_values(self): - expected = IntervalIndex.from_breaks([1, 2, 3, 4]) - actual = IntervalIndex.from_tuples([(3, 4), (1, 2), - (2, 3)]).sort_values() + def test_sort_values(self, closed): + expected = IntervalIndex.from_breaks([1, 2, 3, 4], closed=closed) + actual = IntervalIndex.from_tuples( + [(3, 4), (1, 2), (2, 3)], closed=closed).sort_values() tm.assert_index_equal(expected, actual) # nan - idx = self.index_with_nan + idx = self.create_index_with_nan(closed=closed) mask = idx.isna() tm.assert_numpy_array_equal(mask, np.array([False, True, False])) @@ -733,84 +818,83 @@ def test_sort_values(self): tm.assert_numpy_array_equal(mask, np.array([True, False, False])) def test_datetime(self): - dates = pd.date_range('2000', periods=3) + dates = date_range('2000', periods=3) idx = IntervalIndex.from_breaks(dates) tm.assert_index_equal(idx.left, dates[:2]) tm.assert_index_equal(idx.right, dates[-2:]) - expected = pd.date_range('2000-01-01T12:00', periods=2) + expected = date_range('2000-01-01T12:00', periods=2) tm.assert_index_equal(idx.mid, expected) - assert pd.Timestamp('2000-01-01T12') not in idx - assert pd.Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx - target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H') + target = date_range('1999-12-31T12:00', periods=7, freq='12H') actual = idx.get_indexer(target) expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') tm.assert_numpy_array_equal(actual, expected) - def test_append(self): + def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2]) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3]) + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) result = index1.append(index2) - expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 1, 2], [1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) result = index1.append([index1, index2]) - expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2], - [1, 2, 1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) - def f(): - index1.append(IntervalIndex.from_arrays([0, 1], [1, 2], - closed='both')) - - pytest.raises(ValueError, f) + msg = ('can only append two IntervalIndex objects that are closed ' + 'on the same side') + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + index_other_closed = IntervalIndex.from_arrays( + [0, 1], [1, 2], closed=other_closed) + with tm.assert_raises_regex(ValueError, msg): + index1.append(index_other_closed) - def test_is_non_overlapping_monotonic(self): + def test_is_non_overlapping_monotonic(self, closed): # Should be True in all cases tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is True - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is True # Should be False in all cases (overlapping) tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is False # Should be False in all cases (non-monotonic) tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False - - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - # Should be False for closed='both', overwise True (GH16560) - idx = IntervalIndex.from_breaks(range(4), closed='both') + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) assert idx.is_non_overlapping_monotonic is False - for closed in ('left', 'right', 'neither'): + # Should be False for closed='both', overwise True (GH16560) + if closed == 'both': + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is False + else: idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is True class TestIntervalRange(object): - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_numeric(self, closed): # combinations of start/end/periods without freq expected = IntervalIndex.from_breaks( @@ -848,7 +932,6 @@ def test_construction_from_numeric(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timestamp(self, closed): # combinations of start/end/periods without freq start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06') @@ -915,7 +998,6 @@ def test_construction_from_timestamp(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timedelta(self, closed): # combinations of start/end/periods without freq start, end = Timedelta('1 day'), Timedelta('6 days') diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 18bfc3d0efbee..c9c4029786c64 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2980,3 +2980,13 @@ def test_nan_stays_float(self): assert pd.isna(df0.index.get_level_values(1)).all() # the following failed in 0.14.1 assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + def test_million_record_attribute_error(self): + # GH 18165 + r = list(range(1000000)) + df = pd.DataFrame({'a': r, 'b': r}, + index=pd.MultiIndex.from_tuples([(x, x) for x in r])) + + with tm.assert_raises_regex(AttributeError, + "'Series' object has no attribute 'foo'"): + df['a'].foo() diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index f4f669ee1d087..3cf56dc5115c2 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1282,3 +1282,23 @@ def test_add_overflow(self): result = (to_timedelta([pd.NaT, '5 days', '1 hours']) + to_timedelta(['7 seconds', pd.NaT, '4 hours'])) tm.assert_index_equal(result, exp) + + def test_timedeltaindex_add_timestamp_nat_masking(self): + # GH17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants + ts_pos_variants: + res = tdinat + variant + assert res[1] is pd.NaT diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py index 32609362e49af..3ad3b771b2ab2 100644 --- a/pandas/tests/indexing/test_timedelta.py +++ b/pandas/tests/indexing/test_timedelta.py @@ -2,6 +2,7 @@ import pandas as pd from pandas.util import testing as tm +import numpy as np class TestTimedeltaIndexing(object): @@ -47,3 +48,23 @@ def test_string_indexing(self): expected = df.iloc[0] sliced = df.loc['0 days'] tm.assert_series_equal(sliced, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_masked_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series[series == series[0]] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_listlike_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series.iloc[0] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index c182db35c0c89..4e59779cb9b47 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1245,7 +1245,9 @@ class TestCanHoldElement(object): @pytest.mark.parametrize('value, dtype', [ (1, 'i8'), (1.0, 'f8'), + (2**63, 'f8'), (1j, 'complex128'), + (2**63, 'complex128'), (True, 'bool'), (np.timedelta64(20, 'ns'), '<m8[ns]'), (np.datetime64(20, 'ns'), '<M8[ns]'), diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000..828d5d0ccd3c6 --- /dev/null +++ b/pandas/tests/io/conftest.py @@ -0,0 +1,74 @@ +import os + +import moto +import pytest +from pandas.io.parsers import read_table + +HERE = os.path.dirname(__file__) + + +@pytest.fixture(scope='module') +def tips_file(): + """Path to the tips dataset""" + return os.path.join(HERE, 'parser', 'data', 'tips.csv') + + +@pytest.fixture(scope='module') +def jsonl_file(): + """Path a JSONL dataset""" + return os.path.join(HERE, 'parser', 'data', 'items.jsonl') + + +@pytest.fixture(scope='module') +def salaries_table(): + """DataFrame with the salaries dataset""" + path = os.path.join(HERE, 'parser', 'data', 'salaries.csv') + return read_table(path) + + +@pytest.fixture(scope='module') +def s3_resource(tips_file, jsonl_file): + """Fixture for mocking S3 interaction. + + The primary bucket name is "pandas-test". The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + + A private bucket "cant_get_it" is also created. The boto3 s3 resource + is yielded by the fixture. + """ + pytest.importorskip('s3fs') + moto.mock_s3().start() + + test_s3_files = [ + ('tips.csv', tips_file), + ('tips.csv.gz', tips_file + '.gz'), + ('tips.csv.bz2', tips_file + '.bz2'), + ('items.jsonl', jsonl_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, 'rb') as f: + conn.Bucket(bucket_name).put_object( + Key=s3_key, + Body=f) + + boto3 = pytest.importorskip('boto3') + # see gh-16135 + bucket = 'pandas-test' + + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + add_tips_files(bucket) + + conn.create_bucket(Bucket='cant_get_it', ACL='private') + add_tips_files('cant_get_it') + + yield conn + + moto.mock_s3().stop() diff --git a/pandas/tests/io/data/stata13_dates.dta b/pandas/tests/io/data/stata13_dates.dta new file mode 100644 index 0000000000000..87b857559e501 Binary files /dev/null and b/pandas/tests/io/data/stata13_dates.dta differ diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index aa86d1d9231fb..5504ac942f688 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -91,6 +91,29 @@ def test_to_latex_format(self, frame): assert withindex_result == withindex_expected + def test_to_latex_empty(self): + df = DataFrame() + result = df.to_latex() + expected = r"""\begin{tabular}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\bottomrule +\end{tabular} +""" + assert result == expected + + result = df.to_latex(longtable=True) + expected = r"""\begin{longtable}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\end{longtable} +""" + assert result == expected + def test_to_latex_with_formatters(self): df = DataFrame({'int': [1, 2, 3], 'float': [1.0, 2.0, 3.0], @@ -221,6 +244,28 @@ def test_to_latex_multiindex(self): assert result == expected + def test_to_latex_multiindex_dupe_level(self): + # see gh-14484 + # + # If an index is repeated in subsequent rows, it should be + # replaced with a blank in the created table. This should + # ONLY happen if all higher order indices (to the left) are + # equal too. In this test, 'c' has to be printed both times + # because the higher order index 'A' != 'B'. + df = pd.DataFrame(index=pd.MultiIndex.from_tuples( + [('A', 'c'), ('B', 'c')]), columns=['col']) + result = df.to_latex() + expected = r"""\begin{tabular}{lll} +\toprule + & & col \\ +\midrule +A & c & NaN \\ +B & c & NaN \\ +\bottomrule +\end{tabular} +""" + assert result == expected + def test_to_latex_multicolumnrow(self): df = pd.DataFrame({ ('c1', 0): dict((x, x) for x in range(5)), @@ -355,7 +400,7 @@ def test_to_latex_longtable(self, frame): 1 & 2 & b2 \\ \end{longtable} """ - + open("expected.txt", "w").write(withindex_result) assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, longtable=True) @@ -365,7 +410,7 @@ def test_to_latex_longtable(self, frame): \midrule \endhead \midrule -\multicolumn{3}{r}{{Continued on next page}} \\ +\multicolumn{2}{r}{{Continued on next page}} \\ \midrule \endfoot @@ -378,6 +423,14 @@ def test_to_latex_longtable(self, frame): assert withoutindex_result == withoutindex_expected + df = DataFrame({'a': [1, 2]}) + with1column_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{1}" in with1column_result + + df = DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) + with3columns_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{3}" in with3columns_result + def test_to_latex_escape_special_chars(self): special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^', '\\'] diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 49b765b18d623..1cceae32cd748 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -173,6 +173,21 @@ def test_meta_name_conflict(self): for val in ['metafoo', 'metabar', 'foo', 'bar']: assert val in result + def test_meta_parameter_not_modified(self): + # GH 18610 + data = [{'foo': 'hello', + 'bar': 'there', + 'data': [{'foo': 'something', 'bar': 'else'}, + {'foo': 'something2', 'bar': 'else2'}]}] + + COLUMNS = ['foo', 'bar'] + result = json_normalize(data, 'data', meta=COLUMNS, + meta_prefix='meta') + + assert COLUMNS == ['foo', 'bar'] + for val in ['metafoo', 'metabar', 'foo', 'bar']: + assert val in result + def test_record_prefix(self, state_data): result = json_normalize(state_data[0], 'counties') expected = DataFrame(state_data[0]['counties']) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 6625446bea469..78e33f8966d1f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -4,7 +4,6 @@ from pandas.compat import (range, lrange, StringIO, OrderedDict, is_platform_32bit) import os - import numpy as np from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, read_json, compat) @@ -1030,6 +1029,70 @@ def test_tz_range_is_utc(self): df = DataFrame({'DT': dti}) assert dumps(df, iso_dates=True) == dfexp + def test_read_inline_jsonl(self): + # GH9180 + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_s3_jsonl(self, s3_resource): + pytest.importorskip('s3fs') + # GH17200 + + result = read_json('s3n://pandas-test/items.jsonl', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with ensure_clean('tmp_items.json') as path: + with open(path, 'w') as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], + columns=["a\\", 'b']) + result = df.to_json(orient="records", lines=True) + expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' + '{"a\\\\":"foo\\"","b":"bar"}') + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + def test_latin_encoding(self): if compat.PY2: tm.assert_raises_regex( diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index c68b2bf064d97..6d476e326213e 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -290,11 +290,11 @@ def test_empty_header_read(count): test_empty_header_read(count) def test_parse_trim_buffers(self): - # This test is part of a bugfix for issue #13703. It attmepts to + # This test is part of a bugfix for issue #13703. It attempts to # to stress the system memory allocator, to cause it to move the # stream buffer and either let the OS reclaim the region, or let # other memory requests of parser otherwise modify the contents - # of memory space, where it was formely located. + # of memory space, where it was formally located. # This test is designed to cause a `segfault` with unpatched # `tokenizer.c`. Sometimes the test fails on `segfault`, other # times it fails due to memory corruption, which causes the @@ -346,7 +346,7 @@ def test_parse_trim_buffers(self): # Generate the expected output: manually create the dataframe # by splitting by comma and repeating the `n_lines` times. - row = tuple(val_ if val_ else float("nan") + row = tuple(val_ if val_ else np.nan for val_ in record_.split(",")) expected = pd.DataFrame([row for _ in range(n_lines)], dtype=object, columns=None, index=None) @@ -359,6 +359,15 @@ def test_parse_trim_buffers(self): # Check for data corruption if there was no segfault tm.assert_frame_equal(result, expected) + # This extra test was added to replicate the fault in gh-5291. + # Force 'utf-8' encoding, so that `_string_convert` would take + # a different execution branch. + chunks_ = self.read_csv(StringIO(csv_data), header=None, + dtype=object, chunksize=chunksize, + encoding='utf_8') + result = pd.concat(chunks_, axis=0, ignore_index=True) + tm.assert_frame_equal(result, expected) + def test_internal_null_byte(self): # see gh-14012 # diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index e85d3ad294655..6a996213b28bb 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -823,7 +823,7 @@ def test_parse_integers_above_fp_precision(self): 17007000002000192, 17007000002000194]}) - assert np.array_equal(result['Numbers'], expected['Numbers']) + tm.assert_series_equal(result['Numbers'], expected['Numbers']) def test_chunks_have_consistent_numerical_type(self): integers = [str(i) for i in range(499999)] diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 797c12139656d..84db9d14eee07 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -7,6 +7,7 @@ import pytest +import pandas as pd import pandas.util.testing as tm @@ -157,6 +158,19 @@ def test_read_csv_infer_compression(self): inputs[3].close() + def test_read_csv_compressed_utf16_example(self): + # GH18071 + path = tm.get_data_path('utf16_ex_small.zip') + + result = self.read_csv(path, encoding='utf-16', + compression='zip', sep='\t') + expected = pd.DataFrame({ + u'Country': [u'Venezuela', u'Venezuela'], + u'Twitter': [u'Hugo Chávez Frías', u'Henrique Capriles R.'] + }) + + tm.assert_frame_equal(result, expected) + def test_invalid_compression(self): msg = 'Unrecognized compression type: sfark' with tm.assert_raises_regex(ValueError, msg): diff --git a/pandas/tests/io/parser/data/items.jsonl b/pandas/tests/io/parser/data/items.jsonl new file mode 100644 index 0000000000000..f784d37befa82 --- /dev/null +++ b/pandas/tests/io/parser/data/items.jsonl @@ -0,0 +1,2 @@ +{"a": 1, "b": 2} +{"b":2, "a" :1} diff --git a/pandas/tests/io/parser/data/utf16_ex_small.zip b/pandas/tests/io/parser/data/utf16_ex_small.zip new file mode 100644 index 0000000000000..b0560c1b1f6c4 Binary files /dev/null and b/pandas/tests/io/parser/data/utf16_ex_small.zip differ diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index 7d3df6201a390..b91ce04673e29 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -114,6 +114,17 @@ def test_categorical_dtype(self): actual = self.read_csv(StringIO(data), dtype='category') tm.assert_frame_equal(actual, expected) + @pytest.mark.slow + def test_categorical_dtype_high_cardinality_numeric(self): + # GH 18186 + data = np.sort([str(i) for i in range(524289)]) + expected = DataFrame({'a': Categorical(data, ordered=True)}) + actual = self.read_csv(StringIO('a\n' + '\n'.join(data)), + dtype='category') + actual["a"] = actual["a"].cat.reorder_categories( + np.sort(actual.a.cat.categories), ordered=True) + tm.assert_frame_equal(actual, expected) + def test_categorical_dtype_encoding(self): # GH 10153 pth = tm.get_data_path('unicode_series.csv') diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index 7fbf174e19eee..8dc599b42ddc7 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -312,3 +312,21 @@ def test_empty_na_values_no_default_with_index(self): out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0) tm.assert_frame_equal(out, expected) + + def test_no_na_filter_on_index(self): + # see gh-5239 + data = "a,b,c\n1,,3\n4,5,6" + + # Don't parse NA-values in index when na_filter=False. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=False) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index(["", "5"], name="b")) + tm.assert_frame_equal(out, expected) + + # Parse NA-values in index when na_filter=True. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=True) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index([np.nan, 5.0], name="b")) + tm.assert_frame_equal(out, expected) diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 90103e7bf26b0..4c0f67fa6876a 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -656,3 +656,21 @@ def test_parse_date_column_with_empty_string(self): [621, ' ']] expected = DataFrame(expected_data, columns=['case', 'opdate']) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("data,expected", [ + ("a\n135217135789158401\n1352171357E+5", + DataFrame({"a": [135217135789158401, + 135217135700000]}, dtype="float64")), + ("a\n99999999999\n123456789012345\n1234E+0", + DataFrame({"a": [99999999999, + 123456789012345, + 1234]}, dtype="float64")) + ]) + @pytest.mark.parametrize("parse_dates", [True, False]) + def test_parse_date_float(self, data, expected, parse_dates): + # see gh-2697 + # + # Date parsing should fail, so we leave the data untouched + # (i.e. float precision should remain unchanged). + result = self.read_csv(StringIO(data), parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 27cc708889fa2..d00d3f31ce189 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -4,10 +4,7 @@ Tests parsers ability to read and parse non-local files and hence require a network connection to be read. """ -import os - import pytest -import moto import pandas.util.testing as tm from pandas import DataFrame @@ -15,51 +12,6 @@ from pandas.compat import BytesIO -@pytest.fixture(scope='module') -def tips_file(): - return os.path.join(tm.get_data_path(), 'tips.csv') - - -@pytest.fixture(scope='module') -def salaries_table(): - path = os.path.join(tm.get_data_path(), 'salaries.csv') - return read_table(path) - - -@pytest.fixture(scope='module') -def s3_resource(tips_file): - pytest.importorskip('s3fs') - moto.mock_s3().start() - - test_s3_files = [ - ('tips.csv', tips_file), - ('tips.csv.gz', tips_file + '.gz'), - ('tips.csv.bz2', tips_file + '.bz2'), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, 'rb') as f: - conn.Bucket(bucket_name).put_object( - Key=s3_key, - Body=f) - - boto3 = pytest.importorskip('boto3') - # see gh-16135 - bucket = 'pandas-test' - - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket='cant_get_it', ACL='private') - add_tips_files('cant_get_it') - - yield conn - - moto.mock_s3().stop() - - @pytest.mark.network @pytest.mark.parametrize( "compression,extension", diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index c9088d2ecc5e7..f66f9ccf065f7 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -161,9 +161,9 @@ def test_skip_bad_lines(self): error_bad_lines=False, warn_bad_lines=False) result = reader.read() - expected = {0: ['a', 'd', 'g', 'l'], - 1: ['b', 'e', 'h', 'm'], - 2: ['c', 'f', 'i', 'n']} + expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object), + 1: np.array(['b', 'e', 'h', 'm'], dtype=object), + 2: np.array(['c', 'f', 'i', 'n'], dtype=object)} assert_array_dicts_equal(result, expected) reader = TextReader(StringIO(data), delimiter=':', @@ -189,8 +189,10 @@ def test_header_not_enough_lines(self): assert header == expected recs = reader.read() - expected = {0: [1, 4], 1: [2, 5], 2: [3, 6]} - assert_array_dicts_equal(expected, recs) + expected = {0: np.array([1, 4], dtype=np.int64), + 1: np.array([2, 5], dtype=np.int64), + 2: np.array([3, 6], dtype=np.int64)} + assert_array_dicts_equal(recs, expected) # not enough rows pytest.raises(parser.ParserError, TextReader, StringIO(data), @@ -203,14 +205,16 @@ def test_header_not_enough_lines_as_recarray(self): '1,2,3\n' '4,5,6') - reader = TextReader(StringIO(data), delimiter=',', header=2, - as_recarray=True) + reader = TextReader(StringIO(data), delimiter=',', + header=2, as_recarray=True) header = reader.header expected = [['a', 'b', 'c']] assert header == expected recs = reader.read() - expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]} + expected = {'a': np.array([1, 4], dtype=np.int64), + 'b': np.array([2, 5], dtype=np.int64), + 'c': np.array([3, 6], dtype=np.int64)} assert_array_dicts_equal(expected, recs) # not enough rows @@ -225,7 +229,7 @@ def test_escapechar(self): reader = TextReader(StringIO(data), delimiter=',', header=None, escapechar='\\') result = reader.read() - expected = {0: ['"hello world"'] * 3} + expected = {0: np.array(['"hello world"'] * 3, dtype=object)} assert_array_dicts_equal(result, expected) def test_eof_has_eol(self): @@ -360,7 +364,7 @@ def test_empty_field_eof(self): result = TextReader(StringIO(data), delimiter=',').read() - expected = {0: np.array([1, 4]), + expected = {0: np.array([1, 4], dtype=np.int64), 1: np.array(['2', ''], dtype=object), 2: np.array(['3', ''], dtype=object)} assert_array_dicts_equal(result, expected) @@ -397,4 +401,5 @@ def test_empty_csv_input(self): def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left): - assert(np.array_equal(v, right[k])) + assert tm.assert_numpy_array_equal(np.asarray(v), + np.asarray(right[k])) diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 940a331a9de84..b5d1435c29cb7 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -18,7 +18,7 @@ try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 -except PyperclipException: +except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0 diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index a28adcf1ee771..bc58ea1c7c228 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -180,6 +180,15 @@ def test_scalar_float(self): x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) + def test_scalar_bool(self): + x = np.bool_(1) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + + x = np.bool_(0) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + def test_scalar_complex(self): x = np.random.rand() + 1j * np.random.rand() x_rec = self.encode_decode(x) @@ -263,7 +272,7 @@ def test_numpy_array_complex(self): x.dtype == x_rec.dtype) def test_list_mixed(self): - x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')] + x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo'), np.bool_(1)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) @@ -401,6 +410,7 @@ def setup_method(self, method): 'G': [Timestamp('20130102', tz='US/Eastern')] * 5, 'H': Categorical([1, 2, 3, 4, 5]), 'I': Categorical([1, 2, 3, 4, 5], ordered=True), + 'J': (np.bool_(1), 2, 3, 4, 5), } self.d['float'] = Series(data['A']) @@ -410,6 +420,7 @@ def setup_method(self, method): self.d['dt_tz'] = Series(data['G']) self.d['cat_ordered'] = Series(data['H']) self.d['cat_unordered'] = Series(data['I']) + self.d['numpy_bool_mixed'] = Series(data['J']) def test_basic(self): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ecd4e8f719014..8c88cf076319b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -105,7 +105,7 @@ def test_options_py(df_compat, pa): with pd.option_context('io.parquet.engine', 'pyarrow'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -118,7 +118,7 @@ def test_options_fp(df_compat, fp): with pd.option_context('io.parquet.engine', 'fastparquet'): df.to_parquet(path, compression=None) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -130,7 +130,7 @@ def test_options_auto(df_compat, fp, pa): with pd.option_context('io.parquet.engine', 'auto'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -162,7 +162,7 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=pa, compression=None) - result = read_parquet(path, engine=fp, compression=None) + result = read_parquet(path, engine=fp) tm.assert_frame_equal(result, df) @@ -174,37 +174,40 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None) - result = read_parquet(path, engine=pa, compression=None) + result = read_parquet(path, engine=pa) tm.assert_frame_equal(result, df) class Base(object): def check_error_on_write(self, df, engine, exc): - # check that we are raising the exception - # on writing - + # check that we are raising the exception on writing with pytest.raises(exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, **kwargs): - + def check_round_trip(self, df, engine, expected=None, + write_kwargs=None, read_kwargs=None, + check_names=True): + if write_kwargs is None: + write_kwargs = {} + if read_kwargs is None: + read_kwargs = {} with tm.ensure_clean() as path: - df.to_parquet(path, engine, **kwargs) - result = read_parquet(path, engine) + df.to_parquet(path, engine, **write_kwargs) + result = read_parquet(path, engine, **read_kwargs) if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) # repeat - to_parquet(df, path, engine, **kwargs) - result = pd.read_parquet(path, engine) + to_parquet(df, path, engine, **write_kwargs) + result = pd.read_parquet(path, engine, **read_kwargs) if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) class TestBasic(Base): @@ -222,7 +225,7 @@ def test_columns_dtypes(self, engine): # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) def test_columns_dtypes_invalid(self, engine): @@ -243,44 +246,94 @@ def test_columns_dtypes_invalid(self, engine): datetime.datetime(2011, 1, 1, 1, 1)] self.check_error_on_write(df, engine, ValueError) - def test_write_with_index(self, engine): + @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli']) + def test_compression(self, engine, compression): + + if compression == 'snappy': + pytest.importorskip('snappy') + + elif compression == 'brotli': + pytest.importorskip('brotli') df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, + write_kwargs={'compression': compression}) - # non-default index - for index in [[2, 3, 4], - pd.date_range('20130101', periods=3), - list('abc'), - [1, 3, 4], - pd.MultiIndex.from_tuples([('a', 1), ('a', 2), - ('b', 1)]), - ]: + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({'string': list('abc'), + 'int': list(range(1, 4))}) + + expected = pd.DataFrame({'string': list('abc')}) + self.check_round_trip(df, engine, expected=expected, + write_kwargs={'compression': None}, + read_kwargs={'columns': ['string']}) + + def test_write_index(self, engine): + check_names = engine != 'fastparquet' + + if engine == 'pyarrow': + import pyarrow + if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'): + pytest.skip("pyarrow is < 0.7.0") + df = pd.DataFrame({'A': [1, 2, 3]}) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) + + indexes = [ + [2, 3, 4], + pd.date_range('20130101', periods=3), + list('abc'), + [1, 3, 4], + ] + # non-default index + for index in indexes: df.index = index - self.check_error_on_write(df, engine, ValueError) + self.check_round_trip( + df, engine, + write_kwargs={'compression': None}, + check_names=check_names) # index with meta-data df.index = [0, 1, 2] df.index.name = 'foo' - self.check_error_on_write(df, engine, ValueError) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) - # column multi-index - df.index = [0, 1, 2] - df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]), - self.check_error_on_write(df, engine, ValueError) - - @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli']) - def test_compression(self, engine, compression): + def test_write_multiindex(self, pa_ge_070): + # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version + engine = pa_ge_070 - if compression == 'snappy': - pytest.importorskip('snappy') + df = pd.DataFrame({'A': [1, 2, 3]}) + index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df.index = index + self.check_round_trip(df, engine, write_kwargs={'compression': None}) - elif compression == 'brotli': - pytest.importorskip('brotli') + def test_write_column_multiindex(self, engine): + # column multi-index + mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns) + self.check_error_on_write(df, engine, ValueError) - df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=compression) + def test_multiindex_with_columns(self, pa_ge_070): + + engine = pa_ge_070 + dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS') + df = pd.DataFrame(np.random.randn(2 * len(dates), 3), + columns=list('ABC')) + index1 = pd.MultiIndex.from_product( + [['Level1', 'Level2'], dates], + names=['level', 'date']) + index2 = index1.copy(names=None) + for index in [index1, index2]: + df.index = index + with tm.ensure_clean() as path: + df.to_parquet(path, engine) + result = read_parquet(path, engine) + expected = df + tm.assert_frame_equal(result, expected) + result = read_parquet(path, engine, columns=['A', 'B']) + expected = df[['A', 'B']] + tm.assert_frame_equal(result, expected) class TestParquetPyArrow(Base): @@ -307,14 +360,12 @@ def test_basic(self, pa): self.check_round_trip(df, pa) def test_duplicate_columns(self, pa): - # not currently able to handle duplicate columns df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('aaa')).copy() self.check_error_on_write(df, pa, ValueError) def test_unsupported(self, pa): - # period df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)}) self.check_error_on_write(df, pa, ValueError) @@ -368,7 +419,7 @@ def test_basic(self, fp): 'timedelta': pd.timedelta_range('1 day', periods=3), }) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -381,7 +432,8 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected, compression=None) + self.check_round_trip(df, fp, expected=expected, + write_kwargs={'compression': None}) def test_unsupported(self, fp): @@ -397,7 +449,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -407,4 +459,13 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): self.check_round_trip(df, fp, df.astype('datetime64[ns]'), - compression=None) + write_kwargs={'compression': None}) + + def test_filter_row_groups(self, fp): + d = {'a': list(range(0, 3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, fp, compression=None, + row_group_offsets=1) + result = read_parquet(path, fp, filters=[('a', '==', 0)]) + assert len(result) == 1 diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index a97747b93369f..a7cc6b711802e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4928,6 +4928,25 @@ def test_categorical_conversion(self): result = read_hdf(path, 'df', where='obsids=B') tm.assert_frame_equal(result, expected) + def test_categorical_nan_only_columns(self): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = pd.DataFrame({ + 'a': ['a', 'b', 'c', np.nan], + 'b': [np.nan, np.nan, np.nan, np.nan], + 'c': [1, 2, 3, 4], + 'd': pd.Series([None] * 4, dtype=object) + }) + df['a'] = df.a.astype('category') + df['b'] = df.b.astype('category') + df['d'] = df.b.astype('category') + expected = df + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, expected) + def test_duplicate_column_name(self): df = DataFrame(columns=["a", "a"], data=[[0, 0]]) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 2df43158b5370..4528565eefa0c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -88,6 +88,7 @@ "TextCol" TEXT, "DateCol" TEXT, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" REAL, "IntCol" INTEGER, "BoolCol" INTEGER, @@ -98,6 +99,7 @@ `TextCol` TEXT, `DateCol` DATETIME, `IntDateCol` INTEGER, + `IntDateOnlyCol` INTEGER, `FloatCol` DOUBLE, `IntCol` INTEGER, `BoolCol` BOOLEAN, @@ -109,6 +111,7 @@ "DateCol" TIMESTAMP, "DateColWithTz" TIMESTAMP WITH TIME ZONE, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" DOUBLE PRECISION, "IntCol" INTEGER, "BoolCol" BOOLEAN, @@ -120,31 +123,33 @@ 'sqlite': { 'query': """ INSERT INTO types_test_data - VALUES(?, ?, ?, ?, ?, ?, ?, ?) + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'mysql': { 'query': """ INSERT INTO types_test_data - VALUES("%s", %s, %s, %s, %s, %s, %s, %s) + VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'postgresql': { 'query': """ INSERT INTO types_test_data - VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) + VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( 'TextCol', 'DateCol', 'DateColWithTz', - 'IntDateCol', 'FloatCol', + 'IntDateCol', 'IntDateOnlyCol', 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' ) }, @@ -313,13 +318,13 @@ def _load_raw_sql(self): self.drop_table('types_test_data') self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) ins = SQL_STRINGS['insert_test_types'][self.flavor] - data = [ { 'TextCol': 'first', 'DateCol': '2000-01-03 00:00:00', 'DateColWithTz': '2000-01-01 00:00:00-08:00', 'IntDateCol': 535852800, + 'IntDateOnlyCol': 20101010, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -331,6 +336,7 @@ def _load_raw_sql(self): 'DateCol': '2000-01-04 00:00:00', 'DateColWithTz': '2000-06-01 00:00:00-07:00', 'IntDateCol': 1356998400, + 'IntDateOnlyCol': 20101212, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -610,20 +616,42 @@ def test_date_parsing(self): df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['DateCol']) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['IntDateCol']) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'IntDateCol': 's'}) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] + + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + parse_dates={'IntDateOnlyCol': '%Y%m%d'}) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + pd.Timestamp('2010-10-10'), + pd.Timestamp('2010-12-12') + ] def test_date_and_index(self): # Test case where same column appears in parse_date and index_col diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 055a490bc6b5d..78b47960e1a04 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -96,6 +96,8 @@ def setup_method(self, method): self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta') + self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta') + def read_dta(self, file): # Legacy default reader configuration return read_stata(file, convert_dates=True) @@ -1327,3 +1329,22 @@ def test_set_index(self): df.to_stata(path) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) + + @pytest.mark.parametrize( + 'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr']) + def test_date_parsing_ignores_format_details(self, column): + # GH 17797 + # + # Test that display formats are ignored when determining if a numeric + # column is a date value. + # + # All date types are stored as numbers and format associated with the + # column denotes both the type of the date and the display format. + # + # STATA supports 9 date types which each have distinct units. We test 7 + # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that + # accounts for leap seconds and %tb relies on STATAs business calendar. + df = read_stata(self.stata_dates) + unformatted = df.loc[0, column] + formatted = df.loc[0, column + "_fmt"] + assert unformatted == formatted diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index e1f64bed5598d..3818c04649366 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -1,20 +1,144 @@ +import subprocess import pytest from datetime import datetime, date import numpy as np -from pandas import Timestamp, Period, Index +from pandas import Timestamp, Period, Index, date_range, Series from pandas.compat import u +import pandas.core.config as cf import pandas.util.testing as tm from pandas.tseries.offsets import Second, Milli, Micro, Day from pandas.compat.numpy import np_datetime64_compat converter = pytest.importorskip('pandas.plotting._converter') +from pandas.plotting import (register_matplotlib_converters, + deregister_matplotlib_converters) def test_timtetonum_accepts_unicode(): assert (converter.time2num("00:01") == converter.time2num(u("00:01"))) +class TestRegistration(object): + + def test_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ("'import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp in units)'") + call = ['python', '-c', code] + assert subprocess.check_call(call) == 0 + + def test_warns(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warning" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + ax.plot(s.index, s.values) + plt.close() + + assert len(w) == 1 + assert "Using an implicitly registered datetime converter" in str(w[0]) + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + register_matplotlib_converters() + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_pandas_plots_register(self): + pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(None) as w: + s.plot() + + assert len(w) == 0 + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + assert Timestamp in units.registry + + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + with ctx: + assert Timestamp not in units.registry + + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + converter._WARN = True + # Test without registering first, no warning + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + # Now test with registering + converter._WARN = True + register_matplotlib_converters() + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + def test_old_import_warns(self): + with tm.assert_produces_warning(FutureWarning) as w: + from pandas.tseries import converter + converter.register() + + assert len(w) + assert ('pandas.plotting.register_matplotlib_converters' in + str(w[0].message)) + + class TestDateTimeConverter(object): def setup_method(self, method): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index d66012e2a56a0..d6cedac747f25 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,13 +1,14 @@ """ Test cases for time series specific (freq conversion, etc) """ from datetime import datetime, timedelta, date, time +import pickle import pytest from pandas.compat import lrange, zip import numpy as np from pandas import Index, Series, DataFrame, NaT -from pandas.compat import is_platform_mac +from pandas.compat import is_platform_mac, PY3 from pandas.core.indexes.datetimes import date_range, bdate_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.tseries.offsets import DateOffset @@ -1470,5 +1471,12 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): with ensure_clean(return_filelike=True) as path: plt.savefig(path) + + # GH18439 + # this is supported only in Python 3 pickle since + # pickle in Python2 doesn't support instancemethod pickling + if PY3: + with ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) finally: plt.close(fig) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 6f476553091d9..54a512d14fef4 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -201,6 +201,7 @@ def test_parallel_coordinates(self): with tm.assert_produces_warning(FutureWarning): parallel_coordinates(df, 'Name', colors=colors) + @pytest.mark.xfail(reason="unreliable test") def test_parallel_coordinates_with_sorted_labels(self): """ For #15908 """ from pandas.plotting import parallel_coordinates diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 84a15cab34cd0..11368e44943d8 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1594,7 +1594,9 @@ def test_concat_series_axis1_same_names_ignore_index(self): s2 = Series(randn(len(dates)), index=dates, name='value') result = concat([s1, s2], axis=1, ignore_index=True) - assert np.array_equal(result.columns, [0, 1]) + expected = Index([0, 1]) + + tm.assert_index_equal(result.columns, expected) def test_concat_iterables(self): from collections import deque, Iterable @@ -1981,3 +1983,21 @@ def test_concat_will_upcast(dt, pdt): pdt(np.array([5], dtype=dt, ndmin=dims))] x = pd.concat(dfs) assert x.values.dtype == 'float64' + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = pd.DataFrame({'foo': [1]}) + df2 = pd.DataFrame({'foo': []}) + expected = pd.DataFrame({'foo': [1.0]}) + result = pd.concat([df1, df2]) + assert_frame_equal(result, expected) + + +def test_concat_empty_and_non_empty_series_regression(): + # GH 18187 regression test + s1 = pd.Series([1]) + s2 = pd.Series([]) + expected = s1 + result = pd.concat([s1, s2]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index 172667c9a0fb8..33d91af21c723 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -861,6 +861,12 @@ def test_validation(self): result = merge(left, right, on=['a', 'b'], validate='1:1') assert_frame_equal(result, expected_multi) + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = pd.DataFrame({'a': [], 'b': [], 'c': []}) + with np.errstate(divide='raise'): + merge(a, a, on=('a', 'b')) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']: diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py index 78bfa2ff8597c..4b2680b9be592 100644 --- a/pandas/tests/reshape/test_merge_asof.py +++ b/pandas/tests/reshape/test_merge_asof.py @@ -973,3 +973,15 @@ def test_on_float_by_int(self): columns=['symbol', 'exch', 'price', 'mpv']) assert_frame_equal(result, expected) + + def test_merge_datatype_error(self): + """ Tests merge datatype mismatch error """ + msg = 'merge keys \[0\] object and int64, must be the same type' + + left = pd.DataFrame({'left_val': [1, 5, 10], + 'a': ['a', 'b', 'c']}) + right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7], + 'a': [1, 2, 3, 6, 7]}) + + with tm.assert_raises_regex(MergeError, msg): + merge_asof(left, right, on='a') diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 135e4c544de41..0e69371511294 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -125,12 +125,13 @@ def test_round_nat(klass): def test_NaT_methods(): # GH 9513 + # GH 17329 for `timestamp` raise_methods = ['astimezone', 'combine', 'ctime', 'dst', 'fromordinal', 'fromtimestamp', 'isocalendar', 'strftime', 'strptime', 'time', 'timestamp', 'timetuple', 'timetz', 'toordinal', 'tzname', 'utcfromtimestamp', 'utcnow', 'utcoffset', - 'utctimetuple'] + 'utctimetuple', 'timestamp'] nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today', 'tz_convert', 'tz_localize'] nan_methods = ['weekday', 'isoweekday'] diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index c1b9f858a08de..4053257fbd2c8 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -19,7 +19,7 @@ from pandas._libs import tslib, period from pandas._libs.tslibs.timezones import get_timezone -from pandas.compat import lrange, long +from pandas.compat import lrange, long, PY3 from pandas.util.testing import assert_series_equal from pandas.compat.numpy import np_datetime64_compat from pandas import (Timestamp, date_range, Period, Timedelta, compat, @@ -1079,6 +1079,28 @@ def test_is_leap_year(self): dt = Timestamp('2100-01-01 00:00:00', tz=tz) assert not dt.is_leap_year + def test_timestamp(self): + # GH#17329 + # tz-naive --> treat it as if it were UTC for purposes of timestamp() + ts = Timestamp.now() + uts = ts.replace(tzinfo=utc) + assert ts.timestamp() == uts.timestamp() + + tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central') + utsc = tsc.tz_convert('UTC') + + # utsc is a different representation of the same time + assert tsc.timestamp() == utsc.timestamp() + + if PY3: + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() + class TestTimestampNsOperations(object): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 8cc40bb5146c5..d6db2ab83098b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -28,40 +28,124 @@ class TestSeriesAnalytics(TestData): @pytest.mark.parametrize("use_bottleneck", [True, False]) - @pytest.mark.parametrize("method", ["sum", "prod"]) - def test_empty(self, method, use_bottleneck): - + @pytest.mark.parametrize("method, unit", [ + ("sum", 0.0), + ("prod", 1.0) + ]) + def test_empty(self, method, unit, use_bottleneck): with pd.option_context("use_bottleneck", use_bottleneck): - # GH 9422 - # treat all missing as NaN + # GH 9422 / 18921 + # Entirely empty s = Series([]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explict + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # Skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # All-NA s = Series([np.nan]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # Mix of valid, empty s = Series([np.nan, 1]) + # Default result = getattr(s, method)() assert result == 1.0 - s = Series([np.nan, 1]) + # Explicit + result = getattr(s, method)(min_count=0) + assert result == 1.0 + + result = getattr(s, method)(min_count=1) + assert result == 1.0 + + # Skipna result = getattr(s, method)(skipna=True) assert result == 1.0 + result = getattr(s, method)(skipna=True, min_count=0) + assert result == 1.0 + + result = getattr(s, method)(skipna=True, min_count=1) + assert result == 1.0 + # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) - assert (df.sum(1).isnull()).all() + assert (getattr(df, method)(1) == unit).all() + + s = pd.Series([1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan, 1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0.0), + ('prod', 1.0), + ]) + def test_empty_multi(self, method, unit): + s = pd.Series([1, np.nan, np.nan, np.nan], + index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) + # 1 / 0 by default + result = getattr(s, method)(level=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(s, method)(level=0, min_count=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(s, method)(level=0, min_count=1) + expected = pd.Series([1, np.nan], index=['a', 'b']) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "method", ['sum', 'mean', 'median', 'std', 'var']) + "method", ['mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): # GH 7869 @@ -109,7 +193,7 @@ def test_sum_overflow(self, use_bottleneck): assert np.allclose(float(result), v[-1]) def test_sum(self): - self._check_stat_op('sum', np.sum, check_allna=True) + self._check_stat_op('sum', np.sum, check_allna=False) def test_sum_inf(self): s = Series(np.random.randn(10)) @@ -848,6 +932,12 @@ def test_value_counts_nunique(self): result = series.nunique() assert result == 11 + # GH 18051 + s = pd.Series(pd.Categorical([])) + assert s.nunique() == 0 + s = pd.Series(pd.Categorical([np.nan])) + assert s.nunique() == 0 + def test_unique(self): # 714 also, dtype=float @@ -920,6 +1010,14 @@ def test_drop_duplicates(self): sc.drop_duplicates(keep=False, inplace=True) assert_series_equal(sc, s[~expected]) + # GH 18051 + s = pd.Series(pd.Categorical([])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([]), + check_dtype=False) + s = pd.Series(pd.Categorical([np.nan])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]), + check_dtype=False) + def test_clip(self): val = self.ts.median() diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index bd4e8b23f31b4..5ca4eba4da13b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -636,17 +636,21 @@ def test_valid(self): def test_isna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.isna(), - Series([False, False, False, True, False]).values) + expected = Series([False, False, False, True, False]) + tm.assert_series_equal(ser.isna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.isna(), Series([False, False, True]).values) + expected = Series([False, False, True]) + tm.assert_series_equal(ser.isna(), expected) def test_notna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.notna(), - Series([True, True, True, False, True]).values) + expected = Series([True, True, True, False, True]) + tm.assert_series_equal(ser.notna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.notna(), Series([True, True, False]).values) + expected = Series([True, True, False]) + tm.assert_series_equal(ser.notna(), expected) def test_pad_nan(self): x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'], diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index cf5e3fe4f29b0..255367523a3d8 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -38,7 +38,7 @@ def test_quantile(self): # GH7661 result = Series([np.timedelta64('NaT')]).sum() - assert result is pd.NaT + assert result == pd.Timedelta(0) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 38625bfb29917..240a7ad4b22f9 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1132,19 +1132,19 @@ def test_pad_backfill_object_segfault(): result = libalgos.pad_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.pad_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_arrmap(): @@ -1219,7 +1219,7 @@ def test_is_lexsorted(): 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'), np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, @@ -1231,19 +1231,10 @@ def test_is_lexsorted(): 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, - 4, 3, 2, 1, 0])] + 4, 3, 2, 1, 0], dtype='int64')] assert (not libalgos.is_lexsorted(failure)) -# def test_get_group_index(): -# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64) -# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64) -# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64) - -# result = lib.get_group_index([a, b], (3, 4)) - -# assert(np.array_equal(result, expected)) - def test_groupsort_indexer(): a = np.random.randint(0, 1000, 100).astype(np.int64) @@ -1252,14 +1243,22 @@ def test_groupsort_indexer(): result = libalgos.groupsort_indexer(a, 1000)[0] # need to use a stable sort + # np.argsort returns int, groupsort_indexer + # always returns int64 expected = np.argsort(a, kind='mergesort') - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) # compare with lexsort + # np.lexsort returns int, groupsort_indexer + # always returns int64 key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) def test_infinity_sort(): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 272ba25bf8f8a..48c1622aa0c4e 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -2124,6 +2124,13 @@ def test_creation_astype(self): res = s.astype(CategoricalDtype(list('abcdef'), ordered=True)) tm.assert_series_equal(res, exp) + @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']]) + def test_empty_astype(self, columns): + # GH 18004 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + DataFrame(columns=columns).astype('category') + def test_construction_series(self): l = [1, 2, 3, 1] @@ -3156,18 +3163,6 @@ def test_info(self): buf = compat.StringIO() df2.info(buf=buf) - def test_groupby_sort(self): - - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby - # This should result in a properly sorted Series so that the plot - # has a sorted x axis - # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') - - res = self.cat.groupby(['value_group'])['value_group'].count() - exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] - exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name) - tm.assert_series_equal(res, exp) - def test_min_max(self): # unordered cats have no min/max cat = Series(Categorical(["a", "b", "c", "d"], ordered=False)) @@ -3287,123 +3282,6 @@ def test_value_counts_with_nan(self): res = s.value_counts(dropna=False, sort=False) tm.assert_series_equal(res, exp) - def test_groupby(self): - - cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], - categories=["a", "b", "c", "d"], ordered=True) - data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) - - exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b', - ordered=True) - expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) - result = data.groupby("b").mean() - tm.assert_frame_equal(result, expected) - - raw_cat1 = Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - raw_cat2 = Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) - - # single grouper - gb = df.groupby("A") - exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) - expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers - gb = df.groupby(['A', 'B']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True)], - names=['A', 'B']) - expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, - np.nan, np.nan, np.nan]}, - index=exp_index) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers with a non-cat - df = df.copy() - df['C'] = ['foo', 'bar'] * 2 - gb = df.groupby(['A', 'B', 'C']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True), - ['foo', 'bar']], - names=['A', 'B', 'C']) - expected = DataFrame({'values': Series( - np.nan, index=exp_index)}).sort_index() - expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # GH 8623 - x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], - [1, 'John P. Doe']], - columns=['person_id', 'person_name']) - x['person_name'] = pd.Categorical(x.person_name) - - g = x.groupby(['person_id']) - result = g.transform(lambda x: x) - tm.assert_frame_equal(result, x[['person_name']]) - - result = x.drop_duplicates('person_name') - expected = x.iloc[[0, 1]] - tm.assert_frame_equal(result, expected) - - def f(x): - return x.drop_duplicates('person_name').iloc[0] - - result = g.apply(f) - expected = x.iloc[[0, 1]].copy() - expected.index = Index([1, 2], name='person_id') - expected['person_name'] = expected['person_name'].astype('object') - tm.assert_frame_equal(result, expected) - - # GH 9921 - # Monotonic - df = DataFrame({"a": [5, 15, 25]}) - c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) - - # Filter - tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) - tm.assert_frame_equal(df.groupby(c).filter(np.all), df) - - # Non-monotonic - df = DataFrame({"a": [5, 15, 25, -5]}) - c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) - - # GH 9603 - df = pd.DataFrame({'a': [1, 0, 0, 0]}) - c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=pd.Categorical(list('abcd'))) - result = df.groupby(c).apply(len) - - exp_index = pd.CategoricalIndex(c.values.categories, - ordered=c.values.ordered) - expected = pd.Series([1, 0, 0, 0], index=exp_index) - expected.index.name = 'a' - tm.assert_series_equal(result, expected) - def test_pivot_table(self): raw_cat1 = Categorical(["a", "a", "b", "b"], diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 61f0c992225c6..b8e9191002640 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -52,7 +52,6 @@ def test_xarray(df): assert df.to_xarray() is not None -@tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 6d2607962dfb0..aebc9cd3deaac 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -73,17 +73,11 @@ def teardown_method(self, method): def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 - operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow'] + operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv'] if not compat.PY3: operations.append('div') for arith in operations: - # numpy >= 1.11 doesn't handle integers - # raised to integer powers - # https://github.com/pandas-dev/pandas/issues/15363 - if arith == 'pow' and not _np_version_under1p11: - continue - operator_name = arith if arith == 'div': operator_name = 'truediv' diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index cde1cab37d09c..af946436b55c7 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -53,7 +53,7 @@ def test_left_join_indexer_unique(): result = _join.left_join_indexer_unique_int64(b, a) expected = np.array([1, 1, 2, 3, 3], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_left_outer_join_bug(): @@ -69,13 +69,14 @@ def test_left_outer_join_bug(): lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False) - exp_lidx = np.arange(len(left)) - exp_ridx = -np.ones(len(left)) + exp_lidx = np.arange(len(left), dtype=np.int64) + exp_ridx = -np.ones(len(left), dtype=np.int64) + exp_ridx[left == 1] = 1 exp_ridx[left == 3] = 0 - assert (np.array_equal(lidx, exp_lidx)) - assert (np.array_equal(ridx, exp_ridx)) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) def test_inner_join_indexer(): diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 2662720bb436d..75aa9aa4e8198 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -198,7 +198,7 @@ def test_get_reverse_indexer(self): indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64) result = lib.get_reverse_indexer(indexer, 5) expected = np.array([4, 2, 3, 6, 7], dtype=np.int64) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) class TestNAObj(object): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..5d56088193d30 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import division, print_function +from distutils.version import LooseVersion from functools import partial import pytest @@ -181,12 +182,17 @@ def _coerce_tds(targ, res): check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, - targarnanval, check_dtype=True, **kwargs): + targarnanval, check_dtype=True, empty_targfunc=None, + **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval - try: + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: targ = targfunc(targartempval, axis=axis, **kwargs) + + try: res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, @@ -218,10 +224,11 @@ def check_fun_data(self, testfunc, targfunc, testarval, targarval, except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, **kwargs) + targarnanval2, check_dtype=check_dtype, + empty_targfunc=empty_targfunc, **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, - targarnan=None, **kwargs): + targarnan=None, empty_targfunc=None, **kwargs): if targar is None: targar = testar if targarnan is None: @@ -231,7 +238,8 @@ def check_fun(self, testfunc, targfunc, testar, targar=None, targarnanval = getattr(self, targarnan) try: self.check_fun_data(testfunc, targfunc, testarval, targarval, - targarnanval, **kwargs) + targarnanval, empty_targfunc=empty_targfunc, + **kwargs) except BaseException as exc: exc.args += ('testar: %s' % testar, 'targar: %s' % targar, 'targarnan: %s' % targarnan) @@ -328,7 +336,8 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, - allow_date=False, allow_tdelta=True, check_dtype=False) + allow_date=False, allow_tdelta=True, check_dtype=False, + empty_targfunc=np.nansum) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, @@ -461,8 +470,12 @@ def test_nankurt(self): allow_tdelta=False) def test_nanprod(self): + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + self.check_funs(nanops.nanprod, np.prod, allow_str=False, - allow_date=False, allow_tdelta=False) + allow_date=False, allow_tdelta=False, + empty_targfunc=np.nanprod) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 33fb6f1108bf2..7e442fcc2fc8b 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -3,6 +3,7 @@ from warnings import catch_warnings from datetime import datetime +from distutils.version import LooseVersion import operator import pytest @@ -10,7 +11,6 @@ import pandas as pd from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas import (Series, DataFrame, Index, date_range, isna, notna, pivot, MultiIndex) from pandas.core.nanops import nanall, nanany @@ -83,13 +83,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -142,7 +145,8 @@ def alt(x): self._check_stat_op('sem', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel @@ -154,11 +158,8 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index c0e8770dff8b8..ef19f11499e00 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -4,11 +4,11 @@ import operator import pytest from warnings import catch_warnings +from distutils.version import LooseVersion import numpy as np from pandas import Series, Index, isna, notna from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D from pandas.tseries.offsets import BDay @@ -37,13 +37,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -106,7 +109,8 @@ def alt(x): # self._check_stat_op('skew', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel4d @@ -117,11 +121,9 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): f = getattr(obj, name) if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index ac8297a53de37..04e702644913f 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from functools import partial from textwrap import dedent +from operator import methodcaller import pytz import pytest @@ -2729,6 +2730,34 @@ def test_resample_weekly_bug_1726(self): # it works! df.resample('W-MON', closed='left', label='left').first() + def test_resample_with_dst_time_change(self): + # GH 15549 + index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000], + tz='UTC').tz_convert('America/Chicago') + df = pd.DataFrame([1, 2], index=index) + result = df.resample('12h', closed='right', + label='right').last().ffill() + + expected_index_values = ['2016-03-09 12:00:00-06:00', + '2016-03-10 00:00:00-06:00', + '2016-03-10 12:00:00-06:00', + '2016-03-11 00:00:00-06:00', + '2016-03-11 12:00:00-06:00', + '2016-03-12 00:00:00-06:00', + '2016-03-12 12:00:00-06:00', + '2016-03-13 00:00:00-06:00', + '2016-03-13 13:00:00-05:00', + '2016-03-14 01:00:00-05:00', + '2016-03-14 13:00:00-05:00', + '2016-03-15 01:00:00-05:00', + '2016-03-15 13:00:00-05:00'] + index = pd.DatetimeIndex(expected_index_values, + tz='UTC').tz_convert('America/Chicago') + expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 2.0], index=index) + assert_frame_equal(result, expected) + def test_resample_bms_2752(self): # GH2753 foo = pd.Series(index=pd.bdate_range('20000101', '20000201')) @@ -3103,6 +3132,26 @@ def f(x): result = g.apply(f) assert_frame_equal(result, expected) + def test_apply_with_mutated_index(self): + # GH 15169 + index = pd.date_range('1-1-2015', '12-31-15', freq='D') + df = pd.DataFrame(data={'col1': np.random.rand(len(index))}, + index=index) + + def f(x): + s = pd.Series([1, 2], index=['a', 'b']) + return s + + expected = df.groupby(pd.Grouper(freq='M')).apply(f) + + result = df.resample('M').apply(f) + assert_frame_equal(result, expected) + + # A case for series + expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f) + result = df['col1'].resample('M').apply(f) + assert_series_equal(result, expected) + def test_resample_groupby_with_label(self): # GH 13235 index = date_range('2000-01-01', freq='2D', periods=5) @@ -3329,8 +3378,45 @@ def test_aggregate_normal(self): assert_frame_equal(expected, dt_result) """ - def test_aggregate_with_nat(self): + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_resample_entirly_nat_window(self, method, unit): + s = pd.Series([0] * 2 + [np.nan] * 2, + index=pd.date_range('2017', periods=4)) + # 0 / 1 by default + result = methodcaller(method)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(s.resample("2d")) + expected = pd.Series([0.0, np.nan], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('func, fill_value', [ + ('min', np.nan), + ('max', np.nan), + ('sum', 0), + ('prod', 1), + ('count', 0), + ]) + def test_aggregate_with_nat(self, func, fill_value): # check TimeGrouper's aggregation is identical as normal groupby + # if NaT is included, 'var', 'std', 'mean', 'first','last' + # and 'nth' doesn't work yet n = 20 data = np.random.randn(n, 4).astype('int64') @@ -3344,39 +3430,78 @@ def test_aggregate_with_nat(self): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'sum', 'prod']: - normal_result = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() - for func in ['count']: - normal_result = getattr(normal_grouped, func)() - pad = DataFrame([[0, 0, 0, 0]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) + pad = DataFrame([[fill_value] * 4], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + assert dt_result.index.name == 'key' - for func in ['size']: - normal_result = getattr(normal_grouped, func)() - pad = Series([0], index=[3]) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - # GH 9925 - assert dt_result.index.name == 'key' + def test_aggregate_with_nat_size(self): + # GH 9925 + n = 20 + data = np.random.randn(n, 4).astype('int64') + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + normal_result = normal_grouped.size() + dt_result = dt_grouped.size() + + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_series_equal(expected, dt_result) + assert dt_result.index.name == 'key' + + def test_repr(self): + # GH18203 + result = repr(TimeGrouper(key='A', freq='H')) + expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, " + "closed='left', label='left', how='mean', " + "convention='e', base=0)") + assert result == expected + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_upsample_sum(self, method, unit): + s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H")) + resampled = s.resample("30T") + index = pd.to_datetime(['2017-01-01T00:00:00', + '2017-01-01T00:30:00', + '2017-01-01T01:00:00']) + + # 0 / 1 by default + result = methodcaller(method)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) - # if NaT is included, 'var', 'std', 'mean', 'first','last' - # and 'nth' doesn't work yet + # min_count=1 + result = methodcaller(method, min_count=1)(resampled) + expected = pd.Series([1, np.nan, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count>1 + result = methodcaller(method, min_count=2)(resampled) + expected = pd.Series([np.nan, np.nan, np.nan], index=index) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index a5b12bbf9608a..06c1fa1c0905a 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -332,16 +332,17 @@ def testit(label_list, shape): label_list2 = decons_group_index(group_index, shape) for a, b in zip(label_list, label_list2): - assert (np.array_equal(a, b)) + tm.assert_numpy_array_equal(a, b) shape = (4, 5, 6) - label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile( - [0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile( - [5, 1, 0, 2, 3, 0, 5, 4], 100)] + label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)] testit(label_list, shape) shape = (10000, 10000) - label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)] + label_list = [np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5)] testit(label_list, shape) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index f1b97081b6d93..8aa69bcbfdf7f 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2086,6 +2086,18 @@ def test_rsplit_to_multiindex_expand(self): tm.assert_index_equal(result, exp) assert result.nlevels == 2 + def test_split_nan_expand(self): + # gh-18450 + s = Series(["foo,bar,baz", NA]) + result = s.str.split(",", expand=True) + exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]]) + tm.assert_frame_equal(result, exp) + + # check that these are actually np.nan and not None + # TODO see GH 18463 + # tm.assert_frame_equal does not differentiate + assert all(np.isnan(x) for x in result.iloc[1]) + def test_split_with_name(self): # GH 12617 diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index c567613acebd1..e65de10c51300 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -475,6 +475,28 @@ def tests_empty_df_rolling(self, roller): result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series([np.nan] * 4, + index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', + '2017-01-06', '2017-01-07'])) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + def test_multi_index_names(self): # GH 16789, 16825 @@ -548,6 +570,19 @@ def test_empty_df_expanding(self, expander): index=pd.DatetimeIndex([])).expanding(expander).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + class TestEWM(Base): @@ -864,7 +899,8 @@ def test_centered_axis_validation(self): .rolling(window=3, center=True, axis=2).mean()) def test_rolling_sum(self): - self._check_moment_func(mom.rolling_sum, np.sum, name='sum') + self._check_moment_func(mom.rolling_sum, np.nansum, name='sum', + zero_min_periods_equal=False) def test_rolling_count(self): counter = lambda x: np.isfinite(x).astype(float).sum() @@ -1349,14 +1385,18 @@ def test_fperr_robustness(self): def _check_moment_func(self, f, static_comp, name=None, window=50, has_min_periods=True, has_center=True, has_time_rule=True, preserve_nan=True, - fill_value=None, test_stable=False, **kwargs): + fill_value=None, test_stable=False, + zero_min_periods_equal=True, + **kwargs): with warnings.catch_warnings(record=True): self._check_ndarray(f, static_comp, window=window, has_min_periods=has_min_periods, preserve_nan=preserve_nan, has_center=has_center, fill_value=fill_value, - test_stable=test_stable, **kwargs) + test_stable=test_stable, + zero_min_periods_equal=zero_min_periods_equal, + **kwargs) with warnings.catch_warnings(record=True): self._check_structures(f, static_comp, @@ -1375,7 +1415,8 @@ def _check_moment_func(self, f, static_comp, name=None, window=50, def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True, preserve_nan=True, has_center=True, fill_value=None, - test_stable=False, test_window=True, **kwargs): + test_stable=False, test_window=True, + zero_min_periods_equal=True, **kwargs): def get_result(arr, window, min_periods=None, center=False): return f(arr, window, min_periods=min_periods, center=center, ** kwargs) @@ -1408,10 +1449,11 @@ def get_result(arr, window, min_periods=None, center=False): assert isna(result[3]) assert notna(result[4]) - # min_periods=0 - result0 = get_result(arr, 20, min_periods=0) - result1 = get_result(arr, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(arr, 20, min_periods=0) + result1 = get_result(arr, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) else: result = get_result(arr, 50) tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) @@ -2491,6 +2533,14 @@ def test_rolling_corr_pairwise(self): self._check_pairwise_moment('rolling', 'corr', window=10, min_periods=5) + @pytest.mark.parametrize('window', range(7)) + def test_rolling_corr_with_zero_variance(self, window): + # GH 18430 + s = pd.Series(np.zeros(20)) + other = pd.Series(np.arange(20)) + + assert s.rolling(window=window).corr(other=other).isna().all() + def _check_pairwise_moment(self, dispatch, name, **kwargs): def get_result(obj, obj2=None): return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) @@ -2979,6 +3029,16 @@ def test_rolling_kurt_edge_cases(self): x = d.rolling(window=4).kurt() tm.assert_series_equal(expected, x) + def test_rolling_skew_eq_value_fperr(self): + # #18804 all rolling skew for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).skew() + assert np.isnan(a).all() + + def test_rolling_kurt_eq_value_fperr(self): + # #18804 all rolling kurt for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).kurt() + assert np.isnan(a).all() + def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True, has_time_rule=True, preserve_nan=True): result = func(self.arr) diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index aa8fe90ea6500..823e22c4f87d1 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas.tseries.offsets as offsets -from pandas.compat import lrange, zip +from pandas.compat import lrange, zip, PY3 from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib @@ -70,7 +70,7 @@ def test_utc_to_local_no_modify(self): rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) # Values are unmodified - assert np.array_equal(rng.asi8, rng_eastern.asi8) + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern')) @@ -108,7 +108,7 @@ def test_localize_utc_conversion_explicit(self): rng = date_range('3/10/2012', '3/11/2012', freq='30T') converted = rng.tz_localize(self.tz('US/Eastern')) expected_naive = rng + offsets.Hour(5) - assert np.array_equal(converted.asi8, expected_naive.asi8) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') @@ -424,7 +424,7 @@ def test_with_tz(self): # datetimes with tzinfo set dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), - '1/1/2009', tz=pytz.utc) + datetime(2009, 1, 1, tzinfo=pytz.utc)) pytest.raises(Exception, bdate_range, datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009', @@ -1278,16 +1278,22 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo) result_pd = Timestamp(dt).replace(tzinfo=tzinfo) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index df603c4d880d8..26d3f3cb85edc 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -1,6 +1,7 @@ # flake8: noqa +import warnings -from pandas.plotting._converter import (register, time2num, +from pandas.plotting._converter import (time2num, TimeConverter, TimeFormatter, PeriodConverter, get_datevalue, DatetimeConverter, @@ -9,3 +10,11 @@ MilliSecondLocator, get_finder, TimeSeries_DateLocator, TimeSeries_DateFormatter) + + +def register(): + from pandas.plotting._converter import register as register_ + msg = ("'pandas.tseries.converter.register' has been moved and renamed to " + "'pandas.plotting.register_matplotlib_converters'. ") + warnings.warn(msg, FutureWarning, stacklevel=2) + register_() diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 730d2782e85d2..b6fc9c78d6476 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1074,8 +1074,12 @@ def assert_categorical_equal(left, right, check_dtype=True, def raise_assert_detail(obj, message, left, right, diff=None): if isinstance(left, np.ndarray): left = pprint_thing(left) + elif is_categorical_dtype(left): + left = repr(left) if isinstance(right, np.ndarray): right = pprint_thing(right) + elif is_categorical_dtype(right): + right = repr(right) msg = """{obj} are different @@ -2857,3 +2861,31 @@ def setTZ(tz): yield finally: setTZ(orig_tz) + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + skipna_wrapper : function + """ + if skipna_alternative: + def skipna_wrapper(x): + return skipna_alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py new file mode 100644 index 0000000000000..aabeb24a0c3c8 --- /dev/null +++ b/scripts/convert_deps.py @@ -0,0 +1,29 @@ +""" +Convert the conda environment.yaml to a pip requirements.txt +""" +import yaml + +exclude = {'python=3'} +rename = {'pytables': 'tables'} + +with open("ci/environment-dev.yaml") as f: + dev = yaml.load(f) + +with open("ci/requirements-optional-conda.txt") as f: + optional = [x.strip() for x in f.readlines()] + +required = dev['dependencies'] +required = [rename.get(dep, dep) for dep in required if dep not in exclude] +optional = [rename.get(dep, dep) for dep in optional if dep not in exclude] + + +with open("ci/requirements_dev.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write('\n'.join(required)) + + +with open("ci/requirements-optional-pip.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write("\n".join(optional)) diff --git a/setup.cfg b/setup.cfg index 0123078523b6f..7a88ee8557dc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ tag_prefix = v parentdir_prefix = pandas- [flake8] -ignore = E731,E402 +ignore = E731,E402,W503 max-line-length = 79 [yapf] diff --git a/setup.py b/setup.py index 158ee9493b6ac..0fea6f5641475 100755 --- a/setup.py +++ b/setup.py @@ -716,6 +716,7 @@ def pxd(name): 'parser/data/*.bz2', 'parser/data/*.txt', 'parser/data/*.tar', + 'parser/data/*.zip', 'parser/data/*.tar.gz', 'sas/data/*.csv', 'sas/data/*.xpt',
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21059
2018-05-15T15:24:48Z
2018-05-15T16:44:13Z
null
2018-05-15T16:44:13Z
Fixed space in small info
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0437c479c9d81..dccc840f5affd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2231,7 +2231,7 @@ def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: - return ("{num:3.1f}{size_q}" + return ("{num:3.1f}{size_q} " "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 return "{num:3.1f}{size_q} {pb}".format(num=num, diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 8fc6fef11798a..668613c494a47 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -5,6 +5,7 @@ from datetime import datetime, timedelta import re import sys +import textwrap from numpy import nan import numpy as np @@ -204,6 +205,25 @@ def test_info(self): frame.info() frame.info(verbose=False) + def test_info_memory(self): + # https://github.com/pandas-dev/pandas/issues/21056 + df = pd.DataFrame({'a': pd.Series([1, 2], dtype='i8')}) + buf = StringIO() + df.info(buf=buf) + result = buf.getvalue() + bytes = float(df.memory_usage().sum()) + + expected = textwrap.dedent("""\ + <class 'pandas.core.frame.DataFrame'> + RangeIndex: 2 entries, 0 to 1 + Data columns (total 1 columns): + a 2 non-null int64 + dtypes: int64(1) + memory usage: {} bytes + """.format(bytes)) + + assert result == expected + def test_info_wide(self): from pandas import set_option, reset_option io = StringIO()
Closes https://github.com/pandas-dev/pandas/issues/21056 ping @jreback @jorisvandenbossche this should be the last failure that was picked up by dask's tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/21057
2018-05-15T14:54:28Z
2018-05-15T19:04:11Z
2018-05-15T19:04:10Z
2018-05-15T21:36:26Z
BUG: Keep original name in str.cat
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index d40ff02fd0285..81d775157cf62 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2320,9 +2320,9 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): res = str_cat(data, others=others, sep=sep, na_rep=na_rep) if isinstance(self._orig, Index): - res = Index(res) + res = Index(res, name=self._orig.name) else: # Series - res = Series(res, index=data.index) + res = Series(res, index=data.index, name=self._orig.name) return res @copy(str_split) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 1a978cbf6363f..9d008dfd25c90 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -144,6 +144,19 @@ def test_cat(self): with tm.assert_raises_regex(ValueError, rgx): strings.str_cat(one, 'three') + @pytest.mark.parametrize('container', [Series, Index]) + @pytest.mark.parametrize('other', [None, Series, Index]) + def test_str_cat_name(self, container, other): + # https://github.com/pandas-dev/pandas/issues/21053 + values = ['a', 'b'] + if other: + other = other(values) + else: + other = values + result = container(values, name='name').str.cat(other, sep=',', + join='left') + assert result.name == 'name' + @pytest.mark.parametrize('series_or_index', ['series', 'index']) def test_str_cat(self, series_or_index): # test_cat above tests "str_cat" from ndarray to ndarray;
Closes https://github.com/pandas-dev/pandas/issues/21053 cc @jreback @jorisvandenbossche @h-vetinari for a quick check if you have a chance.
https://api.github.com/repos/pandas-dev/pandas/pulls/21054
2018-05-15T13:51:20Z
2018-05-15T15:07:43Z
2018-05-15T15:07:42Z
2018-05-15T15:09:43Z
DOC: Updated release and whatsnew for 0.23.0
diff --git a/doc/source/release.rst b/doc/source/release.rst index 709c9b15b55f7..5fe397a7cbb37 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,361 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: https://pypi.org/project/pandas * Documentation: http://pandas.pydata.org +pandas 0.23.0 +------------- + +**Release date**: May 15, 2017 + +This is a major release from 0.23.0 and includes a number of API changes, new +features, enhancements, and performance improvements along with a large number +of bug fixes. We recommend that all users upgrade to this version. + +Highlights include: + +- :ref:`Round-trippable JSON format with 'table' orient <whatsnew_0230.enhancements.round-trippable_json>`. +- :ref:`Instantiation from dicts respects order for Python 3.6+ <whatsnew_0230.api_breaking.dict_insertion_order>`. +- :ref:`Dependent column arguments for assign <whatsnew_0230.enhancements.assign_dependent>`. +- :ref:`Merging / sorting on a combination of columns and index levels <whatsnew_0230.enhancements.merge_on_columns_and_levels>`. +- :ref:`Extending Pandas with custom types <whatsnew_023.enhancements.extension>`. +- :ref:`Excluding unobserved categories from groupby <whatsnew_0230.enhancements.categorical_grouping>`. + +See the :ref:`full whatsnew <whatsnew_0230>` for a list of all the changes. + +Thanks +~~~~~~ + +A total of 328 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Critchley +* AbdealiJK + +* Adam Hooper + +* Albert Villanova del Moral +* Alejandro Giacometti + +* Alejandro Hohmann + +* Alex Rychyk +* Alexander Buchkovsky +* Alexander Lenail + +* Alexander Michael Schade +* Aly Sivji + +* Andreas Költringer + +* Andrew +* Andrew Bui + +* András Novoszáth + +* Andy Craze + +* Andy R. Terrel +* Anh Le + +* Anil Kumar Pallekonda + +* Antoine Pitrou + +* Antonio Linde + +* Antonio Molina + +* Antonio Quinonez + +* Armin Varshokar + +* Artem Bogachev + +* Avi Sen + +* Azeez Oluwafemi + +* Ben Auffarth + +* Bernhard Thiel + +* Bhavesh Poddar + +* BielStela + +* Blair + +* Bob Haffner +* Brett Naul + +* Brock Mendel +* Bryce Guinta + +* Carlos Eduardo Moreira dos Santos + +* Carlos García Márquez + +* Carol Willing +* Cheuk Ting Ho + +* Chitrank Dixit + +* Chris +* Chris Burr + +* Chris Catalfo + +* Chris Mazzullo +* Christian Chwala + +* Cihan Ceyhan + +* Clemens Brunner +* Colin + +* Cornelius Riemenschneider +* Crystal Gong + +* DaanVanHauwermeiren +* Dan Dixey + +* Daniel Frank + +* Daniel Garrido + +* Daniel Sakuma + +* DataOmbudsman + +* Dave Hirschfeld +* Dave Lewis + +* David Adrián Cañones Castellano + +* David Arcos + +* David C Hall + +* David Fischer +* David Hoese + +* David Lutz + +* David Polo + +* David Stansby +* Dennis Kamau + +* Dillon Niederhut +* Dimitri + +* Dr. Irv +* Dror Atariah +* Eric Chea + +* Eric Kisslinger +* Eric O. LEBIGOT (EOL) + +* FAN-GOD + +* Fabian Retkowski + +* Fer Sar + +* Gabriel de Maeztu + +* Gianpaolo Macario + +* Giftlin Rajaiah +* Gilberto Olimpio + +* Gina + +* Gjelt + +* Graham Inggs + +* Grant Roch +* Grant Smith + +* Grzegorz Konefał + +* Guilherme Beltramini +* HagaiHargil + +* Hamish Pitkeathly + +* Hammad Mashkoor + +* Hannah Ferchland + +* Hans +* Haochen Wu + +* Hissashi Rocha + +* Iain Barr + +* Ibrahim Sharaf ElDen + +* Ignasi Fosch + +* Igor Conrado Alves de Lima + +* Igor Shelvinskyi + +* Imanflow + +* Ingolf Becker +* Israel Saeta Pérez +* Iva Koevska + +* Jakub Nowacki + +* Jan F-F + +* Jan Koch + +* Jan Werkmann +* Janelle Zoutkamp + +* Jason Bandlow + +* Jaume Bonet + +* Jay Alammar + +* Jeff Reback +* JennaVergeynst +* Jimmy Woo + +* Jing Qiang Goh + +* Joachim Wagner + +* Joan Martin Miralles + +* Joel Nothman +* Joeun Park + +* John Cant + +* Johnny Metz + +* Jon Mease +* Jonas Schulze + +* Jongwony + +* Jordi Contestí + +* Joris Van den Bossche +* José F. R. Fonseca + +* Jovixe + +* Julio Martinez + +* Jörg Döpfert +* KOBAYASHI Ittoku + +* Kate Surta + +* Kenneth + +* Kevin Kuhl +* Kevin Sheppard +* Krzysztof Chomski +* Ksenia + +* Ksenia Bobrova + +* Kunal Gosar + +* Kurtis Kerstein + +* Kyle Barron + +* Laksh Arora + +* Laurens Geffert + +* Leif Walsh +* Liam Marshall + +* Liam3851 + +* Licht Takeuchi +* Liudmila + +* Ludovico Russo + +* Mabel Villalba + +* Manan Pal Singh + +* Manraj Singh +* Marc + +* Marc Garcia +* Marco Hemken + +* Maria del Mar Bibiloni + +* Mario Corchero + +* Mark Woodbridge + +* Martin Journois + +* Mason Gallo + +* Matias Heikkilä + +* Matt Braymer-Hayes +* Matt Kirk + +* Matt Maybeno + +* Matthew Kirk + +* Matthew Rocklin + +* Matthew Roeschke +* Matthias Bussonnier + +* Max Mikhaylov + +* Maxim Veksler + +* Maximilian Roos +* Maximiliano Greco + +* Michael Penkov +* Michael Röttger + +* Michael Selik + +* Michael Waskom +* Mie~~~ +* Mike Kutzma + +* Ming Li + +* Mitar + +* Mitch Negus + +* Montana Low + +* Moritz Münst + +* Mortada Mehyar +* Myles Braithwaite + +* Nate Yoder +* Nicholas Ursa + +* Nick Chmura +* Nikos Karagiannakis + +* Nipun Sadvilkar + +* Nis Martensen + +* Noah + +* Noémi Éltető + +* Olivier Bilodeau + +* Ondrej Kokes + +* Onno Eberhard + +* Paul Ganssle + +* Paul Mannino + +* Paul Reidy +* Paulo Roberto de Oliveira Castro + +* Pepe Flores + +* Peter Hoffmann +* Phil Ngo + +* Pietro Battiston +* Pranav Suri + +* Priyanka Ojha + +* Pulkit Maloo + +* README Bot + +* Ray Bell + +* Riccardo Magliocchetti + +* Ridhwan Luthra + +* Robert Meyer +* Robin +* Robin Kiplang'at + +* Rohan Pandit + +* Rok Mihevc + +* Rouz Azari +* Ryszard T. Kaleta + +* Sam Cohan +* Sam Foo +* Samir Musali + +* Samuel Sinayoko + +* Sangwoong Yoon +* SarahJessica + +* Sharad Vijalapuram + +* Shubham Chaudhary + +* SiYoungOh + +* Sietse Brouwer +* Simone Basso + +* Stefania Delprete + +* Stefano Cianciulli + +* Stephen Childs + +* StephenVoland + +* Stijn Van Hoey + +* Sven +* Talitha Pumar + +* Tarbo Fukazawa + +* Ted Petrou + +* Thomas A Caswell +* Tim Hoffmann + +* Tim Swast +* Tom Augspurger +* Tommy + +* Tulio Casagrande + +* Tushar Gupta + +* Tushar Mittal + +* Upkar Lidder + +* Victor Villas + +* Vince W + +* Vinícius Figueiredo + +* Vipin Kumar + +* WBare +* Wenhuan + +* Wes Turner +* William Ayd +* Wilson Lin + +* Xbar +* Yaroslav Halchenko +* Yee Mey +* Yeongseon Choe + +* Yian + +* Yimeng Zhang +* ZhuBaohe + +* Zihao Zhao + +* adatasetaday + +* akielbowicz + +* akosel + +* alinde1 + +* amuta + +* bolkedebruin +* cbertinato +* cgohlke +* charlie0389 + +* chris-b1 +* csfarkas + +* dajcs + +* deflatSOCO + +* derestle-htwg +* discort +* dmanikowski-reef + +* donK23 + +* elrubio + +* fivemok + +* fjdiod +* fjetter + +* froessler + +* gabrielclow +* gfyoung +* ghasemnaddaf +* h-vetinari + +* himanshu awasthi + +* ignamv + +* jayfoad + +* jazzmuesli + +* jbrockmendel +* jen w + +* jjames34 + +* joaoavf + +* joders + +* jschendel +* juan huguet + +* l736x + +* luzpaz + +* mdeboc + +* miguelmorin + +* miker985 +* miquelcamprodon + +* orereta + +* ottiP + +* peterpanmj + +* rafarui + +* raph-m + +* readyready15728 + +* rmihael + +* samghelms + +* scriptomation + +* sfoo + +* stefansimik + +* stonebig +* tmnhat2001 + +* tomneep + +* topper-123 +* tv3141 + +* verakai + +* xpvpc + +* zhanghui + + pandas 0.22.0 ------------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 005b2377ed61b..89dab728d2bd4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1,7 +1,7 @@ .. _whatsnew_0230: -v0.23.0 -------- +v0.23.0 (May 15, 2017) +---------------------- This is a major release from 0.22.0 and includes a number of API changes, deprecations, new features, enhancements, and performance improvements along @@ -319,6 +319,7 @@ The method has now gained a keyword ``join`` to control the manner of alignment, In v.0.23 `join` will default to None (meaning no alignment), but this default will change to ``'left'`` in a future version of pandas. .. ipython:: python + :okwarning: s = pd.Series(['a', 'b', 'c', 'd']) t = pd.Series(['b', 'd', 'e', 'c'], index=[1, 3, 4, 2]) @@ -613,6 +614,11 @@ Deprecate Panel with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`, :issue:`18324`). +.. ipython:: python + :suppress: + + import pandas.util.testing as tm + .. ipython:: python :okwarning:
https://api.github.com/repos/pandas-dev/pandas/pulls/21051
2018-05-15T13:20:02Z
2018-05-15T13:49:18Z
2018-05-15T13:49:18Z
2018-05-15T16:21:41Z
DOC: Fix find_undoc_args script to use getfullargspec()
diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py index a135c8e5171a1..c4d846ee3c3e5 100755 --- a/scripts/find_undoc_args.py +++ b/scripts/find_undoc_args.py @@ -62,7 +62,7 @@ def build_loc(f): path = f.__code__.co_filename.split(args.path, 1)[-1][1:] return dict(path=path, lnum=f.__code__.co_firstlineno) - sig_names = set(inspect.getargspec(f).args) + sig_names = set(inspect.getfullargspec(f).args) # XXX numpydoc can be used to get the list of parameters doc = f.__doc__.lower() doc = re.split('^\s*parameters\s*', doc, 1, re.M)[-1]
Use [getfullargspec](https://docs.python.org/3/library/inspect.html#inspect.getfullargspec) instead of [getargspec](https://docs.python.org/3/library/inspect.html#inspect.getargspec) to fix the find_undoc_args.py script
https://api.github.com/repos/pandas-dev/pandas/pulls/21046
2018-05-15T06:03:16Z
2018-05-17T06:39:15Z
null
2018-05-17T06:39:15Z
Fix Inconsistent MultiIndex Sorting
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index dccc840f5affd..2b65af4b368a9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4454,7 +4454,10 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) labels = self._get_axis(axis) - if level: + # make sure that the axis is lexsorted to start + # if not we need to reconstruct to get the correct indexer + labels = labels._sort_levels_monotonic() + if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) @@ -4462,9 +4465,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer - # make sure that the axis is lexsorted to start - # if not we need to reconstruct to get the correct indexer - labels = labels._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_labels_for_sorting(), orders=ascending, na_position=na_position) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..622fa2c226134 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2616,7 +2616,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) index = self.index - if level: + if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index d89731dc09044..d05321abefca6 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -861,6 +861,23 @@ def test_stack_preserve_categorical_dtype(self): tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("level", [0, 'baz']) + def test_unstack_swaplevel_sortlevel(self, level): + # GH 20994 + mi = pd.MultiIndex.from_product([[0], ['d', 'c']], + names=['bar', 'baz']) + df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A']) + df.columns.name = 'foo' + + expected = pd.DataFrame([ + [3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([ + ('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[ + 'baz', 'foo'])) + expected.index.name = 'bar' + + result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) + tm.assert_frame_equal(result, expected) + def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index b60eb89e87da5..599ae683f914b 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -550,18 +550,36 @@ def test_sort_index(self): expected = frame.iloc[:, ::-1] assert_frame_equal(result, expected) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): # GH13496 # sort rows by specified level of multi-index - mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) + mi = MultiIndex.from_tuples([ + [2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi) + + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 2], + [2, 1, 3]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [3, 4], + [1, 2]], index=expected_mi) + result = df.sort_index(level=level) + assert_frame_equal(result, expected) - # MI sort, but no level: sort_level has no effect - mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) - result = df.sort_index(sort_remaining=False) - expected = df.sort_index() + # sort_remaining=False + expected_mi = MultiIndex.from_tuples([ + [1, 1, 1], + [2, 1, 3], + [2, 1, 2]], names=list('ABC')) + expected = pd.DataFrame([ + [5, 6], + [1, 2], + [3, 4]], index=expected_mi) + result = df.sort_index(level=level, sort_remaining=False) assert_frame_equal(result, expected) def test_sort_index_intervalindex(self): diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 01b4ea6eaa238..13e0d1b12c372 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -141,19 +141,20 @@ def test_sort_index_inplace(self): assert result is None tm.assert_series_equal(random_order, self.ts) - def test_sort_index_multiindex(self): + @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 + def test_sort_index_multiindex(self, level): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True - res = s.sort_index(level='A') + res = s.sort_index(level=level) assert_series_equal(backwards, res) # GH13496 - # rows share same level='A': sort has no effect without remaining lvls - res = s.sort_index(level='A', sort_remaining=False) + # sort has no effect without remaining lvls + res = s.sort_index(level=level, sort_remaining=False) assert_series_equal(s, res) def test_sort_index_kind(self):
closes #20994 closes #20945 closes #21052 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is a pretty dark corner and I'll admit that I don't fully understand all of the elements in play. That said, the first problem I noticed with the first referenced issue was an errant conditional that was causing `level=0` and `level='foo'` to go down two different branches, even if the name of the first level was in fact 'foo'. After uncovering that, the subsequent ordering of the returned index then was incorrect regardless of the argument. I moved a monotonic sort around to fix this, though I feel like: - There may be a more general purpose solution to fix this AND - There may be more expressive tests to add here Feedback appreciated
https://api.github.com/repos/pandas-dev/pandas/pulls/21043
2018-05-15T02:10:13Z
2018-05-19T20:15:04Z
2018-05-19T20:15:03Z
2018-06-05T09:04:41Z
CI: Fixed linting on download_wheels
diff --git a/scripts/download_wheels.py b/scripts/download_wheels.py index a4705d0e4e63c..f5cdbbe36d90d 100644 --- a/scripts/download_wheels.py +++ b/scripts/download_wheels.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python """Fetch wheels from wheels.scipy.org for a pandas version.""" import argparse import pathlib @@ -23,7 +24,7 @@ def fetch(version): dest.mkdir(exist_ok=True) files = [x for x in root.xpath("//a/text()") - if x.startswith(f'pandas-{version}') + if x.startswith('pandas-{}'.format(version)) and not dest.joinpath(x).exists()] N = len(files) @@ -32,7 +33,9 @@ def fetch(version): out = str(dest.joinpath(filename)) link = urllib.request.urljoin(base, filename) urllib.request.urlretrieve(link, out) - print(f"Downloaded {link} to {out} [{i}/{N}]") + print("Downloaded {link} to {out} [{i}/{N}]".format( + link=link, out=out, i=i, N=N + )) def main(args=None):
xref https://github.com/pandas-dev/pandas/pull/20928#pullrequestreview-120060546
https://api.github.com/repos/pandas-dev/pandas/pulls/21042
2018-05-15T01:34:05Z
2018-05-15T10:00:21Z
2018-05-15T10:00:21Z
2018-05-15T10:00:24Z
Docstring shift
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 38f334762fa88..84595153103c7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8012,12 +8012,12 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors=errors) _shared_docs['shift'] = (""" - Shift index by desired number of periods with an optional time freq + Shift index by desired number of periods with an optional time freq. Parameters ---------- periods : int - Number of periods to move, can be positive or negative + Number of periods to move, can be positive or negative. freq : DateOffset, timedelta, or time rule string, optional Increment to use from the tseries module or time rule (e.g. 'EOM'). See Notes. @@ -8029,6 +8029,91 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, is not realigned. That is, use freq if you would like to extend the index when shifting and preserve the original data. + Examples + -------- + + >>> df = pd.DataFrame({'group': ['A', 'A', 'A', 'B', 'B', 'B'], + ... 'myvalue': [1, 2, 3, 4, 5, 6]}, + ... index=pd.DatetimeIndex(['2016-06-06', + ... '2016-06-08', + ... '2016-06-09', + ... '2016-06-10', + ... '2016-06-12', + ... '2016-06-13'], + ... name='mydate')) + + >>> df + group myvalue + mydate + 2016-06-06 A 1 + 2016-06-08 A 2 + 2016-06-09 A 3 + 2016-06-10 B 4 + 2016-06-12 B 5 + 2016-06-13 B 6 + + For the groups compute the difference between current `myvalue` and + `myvalue` shifted forward by 1 day. + + If `myvalue` is shifted then the values will simply move down. + + >>> df.myvalue.shift(1) + mydate + 2016-06-06 NaN + 2016-06-08 1.0 + 2016-06-09 2.0 + 2016-06-10 3.0 + 2016-06-12 4.0 + 2016-06-13 5.0 + Name: myvalue, dtype: float64 + + We only want to shift `myvalue` forward by one day before computing + the difference. We can do this by reindexing and filling the groups + first + + >>> date_range = pd.date_range(df.index.min(), df.index.max()) + >>> df = df.reindex(date_range) + >>> df['group'] = df['group'].ffill() + >>> df + group myvalue + 2016-06-06 A 1.0 + 2016-06-07 A NaN + 2016-06-08 A 2.0 + 2016-06-09 A 3.0 + 2016-06-10 B 4.0 + 2016-06-11 B NaN + 2016-06-12 B 5.0 + 2016-06-13 B 6.0 + + After considering the grouping we can calculate the difference + as follows + + >>> result = df['myvalue'] - df.groupby('group')['myvalue'].shift(1) + >>> result + 2016-06-06 NaN + 2016-06-07 NaN + 2016-06-08 NaN + 2016-06-09 1.0 + 2016-06-10 NaN + 2016-06-11 NaN + 2016-06-12 NaN + 2016-06-13 1.0 + Freq: D, Name: myvalue, dtype: float64 + + Concatenate result as a column named `delta` to the original data + + >>> result.name = 'delta' + >>> pd.concat([df, result], axis=1) + group myvalue delta + 2016-06-06 A 1.0 NaN + 2016-06-07 A NaN NaN + 2016-06-08 A 2.0 NaN + 2016-06-09 A 3.0 1.0 + 2016-06-10 B 4.0 NaN + 2016-06-11 B NaN NaN + 2016-06-12 B 5.0 NaN + 2016-06-13 B 6.0 1.0 + Returns ------- shifted : %(klass)s
- [x] closes #20492 - [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21039
2018-05-14T21:35:24Z
2018-11-03T06:40:11Z
null
2018-11-03T06:40:11Z
DOC: Enhancing pivot / reshape docs
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 7d9925d800441..feb58c1c11dfd 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -17,6 +17,8 @@ Reshaping and Pivot Tables Reshaping by pivoting DataFrame objects --------------------------------------- +.. image:: _static/reshaping_pivot.png + .. ipython:: :suppress: @@ -33,8 +35,7 @@ Reshaping by pivoting DataFrame objects In [3]: df = unpivot(tm.makeTimeDataFrame()) -Data is often stored in CSV files or databases in so-called "stacked" or -"record" format: +Data is often stored in so-called "stacked" or "record" format: .. ipython:: python @@ -60,8 +61,6 @@ To select out everything for variable ``A`` we could do: df[df['variable'] == 'A'] -.. image:: _static/reshaping_pivot.png - But suppose we wish to do time series operations with the variables. A better representation would be where the ``columns`` are the unique variables and an ``index`` of dates identifies individual observations. To reshape the data into @@ -81,7 +80,7 @@ column: .. ipython:: python df['value2'] = df['value'] * 2 - pivoted = df.pivot('date', 'variable') + pivoted = df.pivot(index='date', columns='variable') pivoted You can then select subsets from the pivoted ``DataFrame``: @@ -93,6 +92,12 @@ You can then select subsets from the pivoted ``DataFrame``: Note that this returns a view on the underlying data in the case where the data are homogeneously-typed. +.. note:: + :func:`~pandas.pivot` will error with a ``ValueError: Index contains duplicate + entries, cannot reshape`` if the index/column pair is not unique. In this + case, consider using :func:`~pandas.pivot_table` which is a generalization + of pivot that can handle duplicate values for one index/column pair. + .. _reshaping.stacking: Reshaping by stacking and unstacking @@ -698,10 +703,103 @@ handling of NaN: In [3]: np.unique(x, return_inverse=True)[::-1] Out[3]: (array([3, 3, 0, 4, 1, 2]), array([nan, 3.14, inf, 'A', 'B'], dtype=object)) - .. note:: If you just want to handle one column as a categorical variable (like R's factor), you can use ``df["cat_col"] = pd.Categorical(df["col"])`` or ``df["cat_col"] = df["col"].astype("category")``. For full docs on :class:`~pandas.Categorical`, see the :ref:`Categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. + +Examples +-------- + +In this section, we will review frequently asked questions and examples. The +column names and relevant column values are named to correspond with how this +DataFrame will be pivoted in the answers below. + +.. ipython:: python + + np.random.seed([3, 1415]) + n = 20 + + cols = np.array(['key', 'row', 'item', 'col']) + df = cols + pd.DataFrame((np.random.randint(5, size=(n, 4)) // [2, 1, 2, 1]).astype(str)) + df.columns = cols + df = df.join(pd.DataFrame(np.random.rand(n, 2).round(2)).add_prefix('val')) + + df + +Pivoting with Single Aggregations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Suppose we wanted to pivot ``df`` such that the ``col`` values are columns, +``row`` values are the index, and the mean of ``val0`` are the values? In +particular, the resulting DataFrame should look like: + +.. code-block:: ipython + + col col0 col1 col2 col3 col4 + row + row0 0.77 0.605 NaN 0.860 0.65 + row2 0.13 NaN 0.395 0.500 0.25 + row3 NaN 0.310 NaN 0.545 NaN + row4 NaN 0.100 0.395 0.760 0.24 + +This solution uses :func:`~pandas.pivot_table`. Also note that +``aggfunc='mean'`` is the default. It is included here to be explicit. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='mean') + +Note that we can also replace the missing values by using the ``fill_value`` +parameter. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='mean', fill_value=0) + +Also note that we can pass in other aggregation functions as well. For example, +we can also pass in ``sum``. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc='sum', fill_value=0) + +Another aggregation we can do is calculate the frequency in which the columns +and rows occur together a.k.a. "cross tabulation". To do this, we can pass +``size`` to the ``aggfunc`` parameter. + +.. ipython:: python + + df.pivot_table(index='row', columns='col', fill_value=0, aggfunc='size') + +Pivoting with Multiple Aggregations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We can also perform multiple aggregations. For example, to perform both a +``sum`` and ``mean``, we can pass in a list to the ``aggfunc`` argument. + +.. ipython:: python + + df.pivot_table( + values='val0', index='row', columns='col', aggfunc=['mean', 'sum']) + +Note to aggregate over multiple value columns, we can pass in a list to the +``values`` parameter. + +.. ipython:: python + + df.pivot_table( + values=['val0', 'val1'], index='row', columns='col', aggfunc=['mean']) + +Note to subdivide over multiple columns we can pass in a list to the +``columns`` parameter. + +.. ipython:: python + + df.pivot_table( + values=['val0'], index='row', columns=['item', 'col'], aggfunc=['mean']) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7aadf7e735f38..19425cf1a50a1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5503,50 +5503,72 @@ def pivot(self, index=None, columns=None, values=None): ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], - ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]}) + ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df - A B C D - 0 foo one small 1 - 1 foo one large 2 - 2 foo one large 2 - 3 foo two small 3 - 4 foo two small 3 - 5 bar one large 4 - 6 bar one small 5 - 7 bar two small 6 - 8 bar two large 7 + A B C D E + 0 foo one small 1 2 + 1 foo one large 2 4 + 2 foo one large 2 5 + 3 foo two small 3 5 + 4 foo two small 3 6 + 5 bar one large 4 6 + 6 bar one small 5 8 + 7 bar two small 6 9 + 8 bar two large 7 9 + + This first example aggregates values by taking the sum. >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B - bar one 4.0 5.0 - two 7.0 6.0 - foo one 4.0 1.0 - two NaN 6.0 + bar one 4 5 + two 7 6 + foo one 4 1 + two NaN 6 + + We can also fill missing values using the `fill_value` parameter. >>> table = pivot_table(df, values='D', index=['A', 'B'], - ... columns=['C'], aggfunc=np.sum) + ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B - bar one 4.0 5.0 - two 7.0 6.0 - foo one 4.0 1.0 - two NaN 6.0 + bar one 4 5 + two 7 6 + foo one 4 1 + two 0 6 + + The next example aggregates by taking the mean across multiple columns. + + >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': np.mean, + ... 'E': np.mean}) + >>> table + D E + mean mean + A C + bar large 5.500000 7.500000 + small 5.500000 8.500000 + foo large 2.000000 4.500000 + small 2.333333 4.333333 + + We can also calculate multiple types of aggregations for any given + value column. >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E - mean max median min + mean max mean min A C - bar large 5.500000 16 14.5 13 - small 5.500000 15 14.5 14 - foo large 2.000000 10 9.5 9 - small 2.333333 12 11.0 8 + bar large 5.500000 9 7.500000 6 + small 5.500000 9 8.500000 8 + foo large 2.000000 5 4.500000 4 + small 2.333333 6 4.333333 2 Returns -------
- [x] closes #19089 - [x] tests added / passed (NA Just Docs) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Enhancing pivot / reshape docs Added more examples and added Q + A section.
https://api.github.com/repos/pandas-dev/pandas/pulls/21038
2018-05-14T20:13:41Z
2018-11-12T00:21:59Z
2018-11-12T00:21:58Z
2018-11-12T00:22:12Z
Warn on ndarray[int] // timedelta
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index adb4cdf2974a0..73e3e721aad71 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -257,7 +257,7 @@ Pass ``errors='coerce'`` to convert unparseable data to ``NaT`` (not a time): Epoch Timestamps ~~~~~~~~~~~~~~~~ -pandas supports converting integer or float epoch times to ``Timestamp`` and +pandas supports converting integer or float epoch times to ``Timestamp`` and ``DatetimeIndex``. The default unit is nanoseconds, since that is how ``Timestamp`` objects are stored internally. However, epochs are often stored in another ``unit`` which can be specified. These are computed from the starting point specified by the @@ -304,11 +304,12 @@ To invert the operation from above, namely, to convert from a ``Timestamp`` to a stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D') stamps -We convert the ``DatetimeIndex`` to an ``int64`` array, then divide by the conversion unit. +We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the +"unit" (1 second). .. ipython:: python - stamps.view('int64') // pd.Timedelta(1, unit='s') + (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s') .. _timeseries.origin: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..dcd7c56b8013f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1004,6 +1004,7 @@ Deprecations of the ``Series`` and ``Index`` classes have been deprecated and will be removed in a future version (:issue:`20419`). - ``DatetimeIndex.offset`` is deprecated. Use ``DatetimeIndex.freq`` instead (:issue:`20716`) +- Floor division between an integer ndarray and a :class:`Timedelta` is deprecated. Divide by :attr:`Timedelta.value` instead (:issue:`19761`) - Setting ``PeriodIndex.freq`` (which was not guaranteed to work correctly) is deprecated. Use :meth:`PeriodIndex.asfreq` instead (:issue:`20678`) - ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`) - The previous default behavior of negative indices in ``Categorical.take`` is deprecated. In a future version it will change from meaning missing values to meaning positional indices from the right. The future behavior is consistent with :meth:`Series.take` (:issue:`20664`). diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 7aeff9bec75b5..248c648c33db3 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # cython: profile=False import collections +import textwrap +import warnings import sys cdef bint PY3 = (sys.version_info[0] >= 3) @@ -1188,6 +1190,15 @@ class Timedelta(_Timedelta): if other.dtype.kind == 'm': # also timedelta-like return _broadcast_floordiv_td64(self.value, other, _rfloordiv) + elif other.dtype.kind == 'i': + # Backwards compatibility + # GH-19761 + msg = textwrap.dedent("""\ + Floor division between integer array and Timedelta is + deprecated. Use 'array // timedelta.value' instead. + """) + warnings.warn(msg, FutureWarning) + return other // self.value raise TypeError('Invalid dtype {dtype} for ' '{op}'.format(dtype=other.dtype, op='__floordiv__')) @@ -1210,6 +1221,11 @@ class Timedelta(_Timedelta): def __rmod__(self, other): # Naive implementation, room for optimization + if hasattr(other, 'dtype') and other.dtype.kind == 'i': + # TODO: Remove this check with backwards-compat shim + # for integer / Timedelta is removed. + raise TypeError("Invalid type {dtype} for " + "{op}".format(dtype=other.dtype, op='__mod__')) return self.__rdivmod__(other)[1] def __divmod__(self, other): @@ -1219,6 +1235,11 @@ class Timedelta(_Timedelta): def __rdivmod__(self, other): # Naive implementation, room for optimization + if hasattr(other, 'dtype') and other.dtype.kind == 'i': + # TODO: Remove this check with backwards-compat shim + # for integer / Timedelta is removed. + raise TypeError("Invalid type {dtype} for " + "{op}".format(dtype=other.dtype, op='__mod__')) div = other // self return div, other - div * self diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 179768fcc6709..9636c92ec22d5 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -403,10 +403,11 @@ def test_td_rfloordiv_numeric_scalar(self): with pytest.raises(TypeError): td.__rfloordiv__(np.float64(2.0)) - with pytest.raises(TypeError): - td.__rfloordiv__(np.int32(2.0)) with pytest.raises(TypeError): td.__rfloordiv__(np.uint8(9)) + with tm.assert_produces_warning(FutureWarning): + # GH-19761: Change to TypeError. + td.__rfloordiv__(np.int32(2.0)) def test_td_rfloordiv_timedeltalike_array(self): # GH#18846 @@ -432,7 +433,8 @@ def test_td_rfloordiv_numeric_series(self): ser = pd.Series([1], dtype=np.int64) res = td.__rfloordiv__(ser) assert res is NotImplemented - with pytest.raises(TypeError): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + # TODO: GH-19761. Change to TypeError. ser // td def test_mod_timedeltalike(self): diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index ab2bf92a26826..3fdc2aa71bfc0 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -21,6 +21,18 @@ def test_arithmetic_overflow(self): with pytest.raises(OverflowError): pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999) + def test_array_timedelta_floordiv(self): + # https://github.com/pandas-dev/pandas/issues/19761 + ints = pd.date_range('2012-10-08', periods=4, freq='D').view('i8') + msg = r"Use 'array // timedelta.value'" + with tm.assert_produces_warning(FutureWarning) as m: + result = ints // pd.Timedelta(1, unit='s') + + assert msg in str(m[0].message) + expected = np.array([1349654400, 1349740800, 1349827200, 1349913600], + dtype='i8') + tm.assert_numpy_array_equal(result, expected) + def test_ops_error_str(self): # GH 13624 td = Timedelta('1 day')
Closes #19761. ```python In [2]: pd.DatetimeIndex(['1931', '1970', '2017']).view('i8') // pd.Timedelta(1, unit='s') pandas-dev/bin/ipython:1: FutureWarning: Floor division between integer array and Timedelta is deprecated. Use 'array // timedelta.value' instead. Out[2]: array([-1230768000, 0, 1483228800]) ``` Long-term, we'll recommend using `to_epoch` for the case where people are doing this to do conversion to unix epoch. But https://github.com/pandas-dev/pandas/issues/14772 has a few design issues that will take some time to discuss. I think we should just recommend `.value` for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/21036
2018-05-14T18:47:31Z
2018-05-15T10:01:33Z
2018-05-15T10:01:33Z
2019-12-05T15:53:26Z
DOC: Rephrased doc for Series.asof. Added examples
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6ca8f6731bbb8..a0886eb431882 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6495,40 +6495,98 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, def asof(self, where, subset=None): """ - The last row without any NaN is taken (or the last row without - NaN considering only the subset of columns in the case of a DataFrame) + Return the last row(s) without any `NaN`s before `where`. + + The last row (for each element in `where`, if list) without any + `NaN` is taken. + In case of a :class:`~pandas.DataFrame`, the last row without `NaN` + considering only the subset of columns (if not `None`) .. versionadded:: 0.19.0 For DataFrame - If there is no good value, NaN is returned for a Series + If there is no good value, `NaN` is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- - where : date or array of dates - subset : string or list of strings, default None - if not None use these columns for NaN propagation + where : date or array-like of dates + Date(s) before which the last row(s) are returned. + subset : str or array-like of str, default `None` + For DataFrame, if not `None`, only use these columns to + check for `NaN`s. Notes ----- - Dates are assumed to be sorted - Raises if this is not the case + Dates are assumed to be sorted. Raises if this is not the case. Returns ------- - where is scalar - - - value or NaN if input is Series - - Series if input is DataFrame + scalar, Series, or DataFrame - where is Index: same shape object as input + * scalar : when `self` is a Series and `where` is a scalar + * Series: when `self` is a Series and `where` is an array-like, + or when `self` is a DataFrame and `where` is a scalar + * DataFrame : when `self` is a DataFrame and `where` is an + array-like See Also -------- - merge_asof + merge_asof : Perform an asof merge. Similar to left join. - """ + Examples + -------- + A Series and a scalar `where`. + + >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) + >>> s + 10 1.0 + 20 2.0 + 30 NaN + 40 4.0 + dtype: float64 + + >>> s.asof(20) + 2.0 + For a sequence `where`, a Series is returned. The first value is + ``NaN``, because the first element of `where` is before the first + index value. + + >>> s.asof([5, 20]) + 5 NaN + 20 2.0 + dtype: float64 + + Missing values are not considered. The following is ``2.0``, not + ``NaN``, even though ``NaN`` is at the index location for ``30``. + + >>> s.asof(30) + 2.0 + + Take all columns into consideration + + >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], + ... 'b': [None, None, None, None, 500]}, + ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', + ... '2018-02-27 09:02:00', + ... '2018-02-27 09:03:00', + ... '2018-02-27 09:04:00', + ... '2018-02-27 09:05:00'])) + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30'])) + a b + 2018-02-27 09:03:30 NaN NaN + 2018-02-27 09:04:30 NaN NaN + + Take a single column into consideration + + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30']), + ... subset=['a']) + a b + 2018-02-27 09:03:30 30.0 NaN + 2018-02-27 09:04:30 40.0 NaN + """ if isinstance(where, compat.string_types): from pandas import to_datetime where = to_datetime(where)
Used code from https://github.com/pandas-dev/pandas/issues/20652 as example, to illustrate different behaviours. - [x] closes #20652 - [x] tests added / passed (N/A, docs) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21034
2018-05-14T18:26:18Z
2018-11-04T15:01:16Z
2018-11-04T15:01:16Z
2018-11-12T13:22:21Z
TST: add arithmetic operators fixture
diff --git a/pandas/conftest.py b/pandas/conftest.py index 137afaa3b3490..b09cb872a12fb 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +from pandas.compat import PY3 import pandas.util._test_decorators as td @@ -77,6 +78,24 @@ def observed(request): return request.param +_all_arithmetic_operators = ['__add__', '__radd__', + '__sub__', '__rsub__', + '__mul__', '__rmul__', + '__floordiv__', '__rfloordiv__', + '__truediv__', '__rtruediv__', + '__pow__', '__rpow__'] +if not PY3: + _all_arithmetic_operators.extend(['__div__', '__rdiv__']) + + +@pytest.fixture(params=_all_arithmetic_operators) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations + """ + return request.param + + @pytest.fixture(params=[None, 'gzip', 'bz2', 'zip', pytest.param('xz', marks=td.skip_if_no_lzma)]) def compression(request): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index f90fcce973f00..ecb74622edf10 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -827,16 +827,52 @@ def test_sub_datetime64_not_ns(self, box, assert_func): res = dt64 - obj assert_func(res, -expected) - def test_operators_datetimelike(self): - def run_ops(ops, get_ser, test_ser): + def test_operators_datetimelike_invalid(self, all_arithmetic_operators): + # these are all TypeEror ops + op_str = all_arithmetic_operators + + def check(get_ser, test_ser): # check that we are getting a TypeError # with 'operate' (from core/ops.py) for the ops that are not # defined - for op_str in ops: - op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate|cannot'): - op(test_ser) + op = getattr(get_ser, op_str, None) + with tm.assert_raises_regex(TypeError, 'operate|cannot'): + op(test_ser) + + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), + Timestamp('20120104')]) + if op_str not in ['__sub__', '__rsub__']: + check(dt1, dt2) + + # ## datetime64 with timetimedelta ### + # TODO(jreback) __rsub__ should raise? + if op_str not in ['__add__', '__radd__', '__sub__']: + check(dt1, td1) + + # 8260, 10763 + # datetime64 with tz + tz = 'US/Eastern' + dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, + tz=tz), name='foo') + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) + td2 = td1.copy() + td2.iloc[1] = np.nan + + if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']: + check(dt2, td2) + + def test_operators_datetimelike(self): # ## timedelta64 ### td1 = Series([timedelta(minutes=5, seconds=3)] * 3) @@ -848,18 +884,10 @@ def run_ops(ops, get_ser, test_ser): dt1.iloc[2] = np.nan dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), Timestamp('20120104')]) - ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__radd__', '__rmul__', '__rfloordiv__', - '__rtruediv__', '__rdiv__', '__rpow__'] - run_ops(ops, dt1, dt2) dt1 - dt2 dt2 - dt1 # ## datetime64 with timetimedelta ### - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] - run_ops(ops, dt1, td1) dt1 + td1 td1 + dt1 dt1 - td1 @@ -867,28 +895,20 @@ def run_ops(ops, get_ser, test_ser): # td1 - dt1 # ## timetimedelta with datetime64 ### - ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', - '__rdiv__', '__rpow__'] - run_ops(ops, td1, dt1) td1 + dt1 dt1 + td1 - # 8260, 10763 - # datetime64 with tz - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] + def test_operators_datetimelike_with_timezones(self): tz = 'US/Eastern' dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, tz=tz), name='foo') dt2 = dt1.copy() dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) td2 = td1.copy() td2.iloc[1] = np.nan - run_ops(ops, dt1, td1) result = dt1 + td1[0] exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) @@ -1133,25 +1153,23 @@ def test_dt64_series_arith_overflow(self): res = dt - ser tm.assert_series_equal(res, -expected) + @pytest.mark.parametrize('op', ['__add__', '__radd__', + '__sub__', '__rsub__']) @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo']) - def test_dt64_series_add_intlike(self, tz): + def test_dt64_series_add_intlike(self, tz, op): # GH#19123 dti = pd.DatetimeIndex(['2016-01-02', '2016-02-03', 'NaT'], tz=tz) ser = Series(dti) other = Series([20, 30, 40], dtype='uint8') - pytest.raises(TypeError, ser.__add__, 1) - pytest.raises(TypeError, ser.__sub__, 1) + pytest.raises(TypeError, getattr(ser, op), 1) - pytest.raises(TypeError, ser.__add__, other) - pytest.raises(TypeError, ser.__sub__, other) + pytest.raises(TypeError, getattr(ser, op), other) - pytest.raises(TypeError, ser.__add__, other.values) - pytest.raises(TypeError, ser.__sub__, other.values) + pytest.raises(TypeError, getattr(ser, op), other.values) - pytest.raises(TypeError, ser.__add__, pd.Index(other)) - pytest.raises(TypeError, ser.__sub__, pd.Index(other)) + pytest.raises(TypeError, getattr(ser, op), pd.Index(other)) class TestSeriesOperators(TestData):
https://api.github.com/repos/pandas-dev/pandas/pulls/21033
2018-05-14T12:21:28Z
2018-05-14T23:49:23Z
2018-05-14T23:49:23Z
2018-05-14T23:49:23Z
BUG: Fixed 19497 - previously, renaming an index changed its type if …
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..f441e6976bd89 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1364,6 +1364,7 @@ Reshaping - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - Bug in :func:`DataFrame.join` which does an ``outer`` instead of a ``left`` join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) +- Bug in :func:`~DataFrame.rename` where an Index of same-length tuples was converted to a MultiIndex (:issue:`19497`) - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) - Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index e7b2576ca1eae..fe508dc1bb0bc 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5296,7 +5296,7 @@ def _transform_index(index, func, level=None): return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] - return Index(items, name=index.name) + return Index(items, name=index.name, tupleize_cols=False) def _putmask_smart(v, m, n): diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 95b952892c93d..164d6746edec0 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -579,6 +579,17 @@ def test_rename_bug(self): columns=['2001-01-01']) assert_frame_equal(df, expected) + def test_rename_bug2(self): + # GH 19497 + # rename was changing Index to MultiIndex if Index contained tuples + + df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], + columns=["a"]) + df = df.rename({(1, 1): (5, 4)}, axis="index") + expected = DataFrame(data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], + columns=["a"]) + assert_frame_equal(df, expected) + def test_reorder_levels(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0],
Fixes bug GH19497 - previously, renaming an index with tuples changed its type (i.e. from Index to MultiIndex). - [x] closes #19497 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21029
2018-05-14T06:57:19Z
2018-05-14T23:52:16Z
2018-05-14T23:52:15Z
2018-05-15T03:53:24Z
BUG: Iteration over DatetimeIndex stops at chunksize (GH21012)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9761974d77d4b..83950f1d71633 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1365,7 +1365,8 @@ def __iter__(self): converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp") - return iter(converted) + for v in converted: + yield v def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 0722b9175c0c6..1a5f12103595c 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -153,6 +153,17 @@ def test_iteration_preserves_tz(self): assert result._repr_base == expected._repr_base assert result == expected + @pytest.mark.parametrize('periods', [0, 9999, 10000, 10001]) + def test_iteration_over_chunksize(self, periods): + # GH21012 + + index = date_range('2000-01-01 00:00:00', periods=periods, freq='min') + num = 0 + for stamp in index: + assert index[num] == stamp + num += 1 + assert num == len(index) + def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day)
- [x] closes #21012 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry avoided using `yield` and changing DatetimeIndex itself into a iterator. @cbertinato 's advice was helpful.
https://api.github.com/repos/pandas-dev/pandas/pulls/21027
2018-05-14T01:02:31Z
2018-05-15T10:02:35Z
2018-05-15T10:02:34Z
2018-06-25T20:59:21Z
DOC: updated the Series.str.rsplit and Series.str.split docstrings
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 08239ae4dae20..b27cfdfe3f1bd 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1343,108 +1343,7 @@ def str_pad(arr, width, side='left', fillchar=' '): def str_split(arr, pat=None, n=None): - """ - Split strings around given separator/delimiter. - - Split each string in the caller's values by given - pattern, propagating NaN values. Equivalent to :meth:`str.split`. - - Parameters - ---------- - pat : str, optional - String or regular expression to split on. - If not specified, split on whitespace. - n : int, default -1 (all) - Limit number of splits in output. - ``None``, 0 and -1 will be interpreted as return all splits. - expand : bool, default False - Expand the split strings into separate columns. - - * If ``True``, return DataFrame/MultiIndex expanding dimensionality. - * If ``False``, return Series/Index, containing lists of strings. - Returns - ------- - Series, Index, DataFrame or MultiIndex - Type matches caller unless ``expand=True`` (see Notes). - - Notes - ----- - The handling of the `n` keyword depends on the number of found splits: - - - If found splits > `n`, make first `n` splits only - - If found splits <= `n`, make all splits - - If for a certain row the number of found splits < `n`, - append `None` for padding up to `n` if ``expand=True`` - - If using ``expand=True``, Series and Index callers return DataFrame and - MultiIndex objects, respectively. - - See Also - -------- - str.split : Standard library version of this method. - Series.str.get_dummies : Split each string into dummy variables. - Series.str.partition : Split string on a separator, returning - the before, separator, and after components. - - Examples - -------- - >>> s = pd.Series(["this is good text", "but this is even better"]) - - By default, split will return an object of the same size - having lists containing the split elements - - >>> s.str.split() - 0 [this, is, good, text] - 1 [but, this, is, even, better] - dtype: object - >>> s.str.split("random") - 0 [this is good text] - 1 [but this is even better] - dtype: object - - When using ``expand=True``, the split elements will expand out into - separate columns. - - For Series object, output return type is DataFrame. - - >>> s.str.split(expand=True) - 0 1 2 3 4 - 0 this is good text None - 1 but this is even better - >>> s.str.split(" is ", expand=True) - 0 1 - 0 this good text - 1 but this even better - - For Index object, output return type is MultiIndex. - - >>> i = pd.Index(["ba 100 001", "ba 101 002", "ba 102 003"]) - >>> i.str.split(expand=True) - MultiIndex(levels=[['ba'], ['100', '101', '102'], ['001', '002', '003']], - labels=[[0, 0, 0], [0, 1, 2], [0, 1, 2]]) - - Parameter `n` can be used to limit the number of splits in the output. - - >>> s.str.split("is", n=1) - 0 [th, is good text] - 1 [but th, is even better] - dtype: object - >>> s.str.split("is", n=1, expand=True) - 0 1 - 0 th is good text - 1 but th is even better - - If NaN is present, it is propagated throughout the columns - during the split. - - >>> s = pd.Series(["this is good text", "but this is even better", np.nan]) - >>> s.str.split(n=3, expand=True) - 0 1 2 3 - 0 this is good text - 1 but this is even better - 2 NaN NaN NaN NaN - """ if pat is None: if n is None or n == 0: n = -1 @@ -1464,25 +1363,7 @@ def str_split(arr, pat=None, n=None): def str_rsplit(arr, pat=None, n=None): - """ - Split each string in the Series/Index by the given delimiter - string, starting at the end of the string and working to the front. - Equivalent to :meth:`str.rsplit`. - Parameters - ---------- - pat : string, default None - Separator to split on. If None, splits on whitespace - n : int, default -1 (all) - None, 0 and -1 will be interpreted as return all splits - expand : bool, default False - * If True, return DataFrame/MultiIndex expanding dimensionality. - * If False, return Series/Index. - - Returns - ------- - split : Series/Index or DataFrame/MultiIndex of objects - """ if n is None or n == 0: n = -1 f = lambda x: x.rsplit(pat, n) @@ -2325,12 +2206,133 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): res = Series(res, index=data.index, name=self._orig.name) return res - @copy(str_split) + _shared_docs['str_split'] = (""" + Split strings around given separator/delimiter. + + Splits the string in the Series/Index from the %(side)s, + at the specified delimiter string. Equivalent to :meth:`str.%(method)s`. + + Parameters + ---------- + pat : str, optional + String or regular expression to split on. + If not specified, split on whitespace. + n : int, default -1 (all) + Limit number of splits in output. + ``None``, 0 and -1 will be interpreted as return all splits. + expand : bool, default False + Expand the splitted strings into separate columns. + + * If ``True``, return DataFrame/MultiIndex expanding dimensionality. + * If ``False``, return Series/Index, containing lists of strings. + + Returns + ------- + Series, Index, DataFrame or MultiIndex + Type matches caller unless ``expand=True`` (see Notes). + + See Also + -------- + Series.str.split : Split strings around given separator/delimiter. + Series.str.rsplit : Splits string around given separator/delimiter, + starting from the right. + Series.str.join : Join lists contained as elements in the Series/Index + with passed delimiter. + str.split : Standard library version for split. + str.rsplit : Standard library version for rsplit. + + Notes + ----- + The handling of the `n` keyword depends on the number of found splits: + + - If found splits > `n`, make first `n` splits only + - If found splits <= `n`, make all splits + - If for a certain row the number of found splits < `n`, + append `None` for padding up to `n` if ``expand=True`` + + If using ``expand=True``, Series and Index callers return DataFrame and + MultiIndex objects, respectively. + + Examples + -------- + >>> s = pd.Series(["this is a regular sentence", + "https://docs.python.org/3/tutorial/index.html", np.nan]) + + In the default setting, the string is split by whitespace. + + >>> s.str.split() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + Without the `n` parameter, the outputs of `rsplit` and `split` + are identical. + + >>> s.str.rsplit() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `n` parameter can be used to limit the number of splits on the + delimiter. The outputs of `split` and `rsplit` are different. + + >>> s.str.split(n=2) + 0 [this, is, a regular sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + >>> s.str.rsplit(n=2) + 0 [this is a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `pat` parameter can be used to split by other characters. + + >>> s.str.split(pat = "/") + 0 [this is a regular sentence] + 1 [https:, , docs.python.org, 3, tutorial, index... + 2 NaN + dtype: object + + When using ``expand=True``, the split elements will expand out into + separate columns. If NaN is present, it is propagated throughout + the columns during the split. + + >>> s.str.split(expand=True) + 0 1 2 3 + 0 this is a regular + 1 https://docs.python.org/3/tutorial/index.html None None None + 2 NaN NaN NaN NaN \ + + 4 + 0 sentence + 1 None + 2 NaN + + For slightly more complex use cases like splitting the html document name + from a url, a combination of parameter settings can be used. + + >>> s.str.rsplit("/", n=1, expand=True) + 0 1 + 0 this is a regular sentence None + 1 https://docs.python.org/3/tutorial index.html + 2 NaN NaN + """) + + @Appender(_shared_docs['str_split'] % { + 'side': 'beginning', + 'method': 'split'}) def split(self, pat=None, n=-1, expand=False): result = str_split(self._data, pat, n=n) return self._wrap_result(result, expand=expand) - @copy(str_rsplit) + @Appender(_shared_docs['str_split'] % { + 'side': 'end', + 'method': 'rsplit'}) def rsplit(self, pat=None, n=-1, expand=False): result = str_rsplit(self._data, pat, n=n) return self._wrap_result(result, expand=expand)
- [ ] closes #xxxx - [x] tests passed validate_docstrings.py - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] rendered document looks ok 'python make.py --single pandas.Series.str.rsplit' and 'python make.py --single pandas.Series.str.split'
https://api.github.com/repos/pandas-dev/pandas/pulls/21026
2018-05-14T00:32:12Z
2018-06-22T23:36:45Z
2018-06-22T23:36:45Z
2018-06-22T23:36:49Z
PERF: improved performance of CategoricalIndex.is_monotonic*
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index ae1d7029217a4..5464e7cba22c3 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -173,3 +173,23 @@ def setup(self, dtype): def time_isin_categorical(self, dtype): self.series.isin(self.sample) + + +class IsMonotonic(object): + + def setup(self): + N = 1000 + self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N)) + self.s = pd.Series(self.c) + + def time_categorical_index_is_monotonic_increasing(self): + self.c.is_monotonic_increasing + + def time_categorical_index_is_monotonic_decreasing(self): + self.c.is_monotonic_decreasing + + def time_categorical_series_is_monotonic_increasing(self): + self.s.is_monotonic_increasing + + def time_categorical_series_is_monotonic_decreasing(self): + self.s.is_monotonic_decreasing diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5c9c3e2931bd9..8c5111e712a34 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -29,6 +29,7 @@ Deprecations Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ +- Improved performance of :meth:`CategoricalIndex.is_monotonic_increasing`, :meth:`CategoricalIndex.is_monotonic_decreasing` and :meth:`CategoricalIndex.is_monotonic` (:issue:`21025`) - - diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 3ffef5804acf7..78b7ae7054248 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -382,11 +382,11 @@ def is_unique(self): @property def is_monotonic_increasing(self): - return Index(self.codes).is_monotonic_increasing + return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self): - return Index(self.codes).is_monotonic_decreasing + return self._engine.is_monotonic_decreasing @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs) def unique(self, level=None): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 6a1a1a5bdba4f..0e630f69b1a32 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -543,35 +543,41 @@ def test_reindex_empty_index(self): tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) - def test_is_monotonic(self): - c = CategoricalIndex([1, 2, 3]) + @pytest.mark.parametrize('data, non_lexsorted_data', [ + [[1, 2, 3], [9, 0, 1, 2, 3]], + [list('abc'), list('fabcd')], + ]) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], ordered=True) + c = CategoricalIndex(data, ordered=True) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data)) assert not c.is_monotonic_increasing assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1]) + c = CategoricalIndex(data, categories=reversed(data), ordered=True) assert not c.is_monotonic_increasing - assert not c.is_monotonic_decreasing + assert c.is_monotonic_decreasing - c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True) + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) assert not c.is_monotonic_increasing - assert c.is_monotonic_decreasing + assert not c.is_monotonic_decreasing # non lexsorted categories - categories = [9, 0, 1, 2, 3] + categories = non_lexsorted_data - c = CategoricalIndex([9, 0], categories=categories) + c = CategoricalIndex(categories[:2], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing - c = CategoricalIndex([0, 1], categories=categories) + c = CategoricalIndex(categories[1:3], categories=categories) assert c.is_monotonic_increasing assert not c.is_monotonic_decreasing
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ```python >>> n = 1000000 >>> ci = pd.CategoricalIndex(list('a' * n + 'b' * n + 'c' * n)) >>> %t ci.is_monotonic_increasing 22 ms # v0.22 and master 227 ns # this commit ``` There seem to be a few more like this, where ``CategoricalIndex`` should use ``self._engine`` but doesn't. @TomAugspurger?
https://api.github.com/repos/pandas-dev/pandas/pulls/21025
2018-05-14T00:16:10Z
2018-05-17T00:21:51Z
2018-05-17T00:21:51Z
2018-06-08T17:08:16Z
PERF: __contains__ method for Categorical
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 5464e7cba22c3..41460eaf47699 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -193,3 +193,53 @@ def time_categorical_series_is_monotonic_increasing(self): def time_categorical_series_is_monotonic_decreasing(self): self.s.is_monotonic_decreasing + + +class Contains(object): + + params = ([ + "b", # in array + "d", # in categories but not in codes + "z", # nowhere + np.nan, + ], + [True, False], + ) + param_names = ["value", "has_nan"] + + def setup(self, value, has_nan): + n = 1 * 10 ** 4 + obj_values = list("a" * n + "b" * n + "c" * n) + if has_nan: + obj_values = [np.nan] + obj_values[:-2] + [np.nan] + + self.ci = pd.CategoricalIndex(obj_values, categories=list("abcd")) + self.cat = pd.Categorical(obj_values, categories=list("abcd")) + + def time_contains_index(self, value, has_nan): + value in self.ci + + def time_cat_isin(self, value, has_nan): + value in self.cat + + +class Indexing(object): + + params = (["a", "c"], [True, False]) + param_names = ["value", "has_nan"] + + def setup(self, value, has_nan): + n = 1 * 10 ** 4 + obj_values = list("a" * n + "b" * n + "c" * n) + if has_nan: + obj_values = [np.nan] + obj_values[:-2] + [np.nan] + + ci = pd.CategoricalIndex(obj_values, categories=list("abcd")) + self.df = pd.DataFrame(dict(A=range(n * 3)), index=ci) + self.ser = pd.Series(range(n * 3), index=ci) + + def time_loc_df(self, value, has_nan): + self.df.loc[value] + + def time_loc_ser(self, value, has_nan): + self.ser.loc[value] diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index c69de149a0f35..4158edf683c89 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -64,7 +64,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`) -- +- Improved performance of indexing on a Series/DataFrame with a ``CategoricalIndex`` (:issue:`21022`) .. _whatsnew_0240.docs: @@ -83,7 +83,7 @@ Bug Fixes Categorical ^^^^^^^^^^^ -- +- Fixed an issue where membership checks on ``CategoricalIndex`` with interval values may return false positive (:issue:`21022`) - - diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 30f9c56d24f02..3791f854ea7c2 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1847,6 +1847,19 @@ def __iter__(self): """Returns an Iterator over the values of this Categorical.""" return iter(self.get_values().tolist()) + def __contains__(self, key): + """Returns True if `key` is in this Categorical.""" + hash(key) + if isna(key): + return self.isna().any() + elif self.categories._defer_to_indexing: # e.g. Interval values + loc = self.categories.get_loc(key) + return np.isin(self.codes, loc).any() + elif key in self.categories: + return self.categories.get_loc(key) in self._codes + else: + return False + def _tidy_repr(self, max_vals=10, footer=True): """ a short repr displaying only max_vals and an optional (but default footer) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 150eca32e229d..8722170ac41d4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -323,20 +323,10 @@ def _reverse_indexer(self): @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): - hash(key) - - if self.categories._defer_to_indexing: - return key in self.categories - return key in self.values @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) def contains(self, key): - hash(key) - - if self.categories._defer_to_indexing: - return self.categories.contains(key) - return key in self.values def __array__(self, dtype=None): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index a2a4170256088..88d76210da8ba 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -244,6 +244,17 @@ def test_contains(self): list('aabbca') + [np.nan], categories=list('cabdef')) assert np.nan in ci + ci = CategoricalIndex( + list('aaa'), categories=list('cabdef')) + assert 'f' not in ci + + def test_containst_defer_to_indexing(self): + intervals = pd.interval_range(1, 4) + cat = pd.CategoricalIndex(list(intervals[:-1]), categories=intervals) + assert intervals[0] in cat + assert intervals[1] in cat + assert intervals[2] not in cat + def test_min_max(self): ci = self.create_index(ordered=False)
Calling `key in Categorical(...)` trivially falls back to calling `__iter__` and then forces a full construction of the array by invoking `get_values`. This implementation is faster in best case situations than using .e.g.`any(Categorical.isin(key))` since we do not care about the complete array. Not sure if I need to handle any edge cases, though. This obviously can't reach the performance of a simple Index but is a bit faster than before before: ``` · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_Users_fjetter_miniconda2_envs_pandas-dev_bin_python [ 0.00%] ·· Benchmarking existing-py_Users_fjetter_miniconda2_envs_pandas-dev_bin_python [100.00%] ··· Running categoricals.Slicing.time_loc_categorical 2.04ms ``` after ``` · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_Users_fjetter_miniconda2_envs_pandas-dev_bin_python [ 0.00%] ·· Benchmarking existing-py_Users_fjetter_miniconda2_envs_pandas-dev_bin_python [100.00%] ··· Running categoricals.Slicing.time_loc_categorical 852.58μs ``` - [x] closes #20395 - [x] benchmark added - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry cc @topper-123
https://api.github.com/repos/pandas-dev/pandas/pulls/21022
2018-05-13T17:53:38Z
2018-06-19T01:46:31Z
null
2023-05-11T01:17:50Z
DOC: some cleanup of various whatsnew files
diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt index f440be1ddd56e..02ddc362255ec 100644 --- a/doc/source/whatsnew/v0.13.0.txt +++ b/doc/source/whatsnew/v0.13.0.txt @@ -651,7 +651,7 @@ Enhancements Additionally, the ``method`` argument to ``interpolate`` has been expanded to include ``'nearest', 'zero', 'slinear', 'quadratic', 'cubic', - 'barycentric', 'krogh', 'piecewise_polynomial', 'pchip', `polynomial`, 'spline'`` + 'barycentric', 'krogh', 'piecewise_polynomial', 'pchip', 'polynomial', 'spline'`` The new methods require scipy_. Consult the Scipy reference guide_ and documentation_ for more information about when the various methods are appropriate. See :ref:`the docs<missing_data.interpolate>`. diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt index be962ceb181ff..92c699017fc13 100644 --- a/doc/source/whatsnew/v0.14.0.txt +++ b/doc/source/whatsnew/v0.14.0.txt @@ -998,7 +998,7 @@ Bug Fixes - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) -- Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned +- Bug in ``DataFrame.apply`` with functions that used ``*args`` or ``**kwargs`` and returned an empty result (:issue:`6952`) - Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) - Moved ``Panel.shift`` to ``NDFrame.slice_shift`` and fixed to respect multiple dtypes. (:issue:`6959`) diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt index 4674cbc846722..32a2391c75531 100644 --- a/doc/source/whatsnew/v0.14.1.txt +++ b/doc/source/whatsnew/v0.14.1.txt @@ -229,7 +229,7 @@ Bug Fixes :issue:`7409`). - Bug where bool objects were converted to ``nan`` in ``convert_objects`` (:issue:`7416`). -- Bug in ``quantile`` ignoring the axis keyword argument (:issue`7306`) +- Bug in ``quantile`` ignoring the axis keyword argument (:issue:`7306`) - Bug where ``nanops._maybe_null_out`` doesn't work with complex numbers (:issue:`7353`) - Bug in several ``nanops`` functions when ``axis==0`` for diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index c5ef6c8c9d74a..0f1a8c324de54 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -983,7 +983,7 @@ Other: df.describe(include='all') - Without those arguments, 'describe` will behave as before, including only numerical columns or, if none are, only categorical columns. See also the :ref:`docs <basics.describe>` + Without those arguments, ``describe`` will behave as before, including only numerical columns or, if none are, only categorical columns. See also the :ref:`docs <basics.describe>` - Added ``split`` as an option to the ``orient`` argument in ``pd.DataFrame.to_dict``. (:issue:`7840`) diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index f84f25d3e906c..345fc9f1b5da7 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -287,7 +287,7 @@ Bug Fixes - Bug in ``Categorical`` reflected comparison operator raising if the first argument was a numpy array scalar (e.g. np.int64) (:issue:`8658`) - Bug in Panel indexing with a list-like (:issue:`8710`) - Compat issue is ``DataFrame.dtypes`` when ``options.mode.use_inf_as_null`` is True (:issue:`8722`) -- Bug in ``read_csv``, ``dialect`` parameter would not take a string (:issue: `8703`) +- Bug in ``read_csv``, ``dialect`` parameter would not take a string (:issue:`8703`) - Bug in slicing a multi-index level with an empty-list (:issue:`8737`) - Bug in numeric index operations of add/sub with Float/Index Index with numpy arrays (:issue:`8608`) - Bug in setitem with empty indexer and unwanted coercion of dtypes (:issue:`8669`) diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt index 91ec0c3038985..29f6832b48aaf 100644 --- a/doc/source/whatsnew/v0.16.2.txt +++ b/doc/source/whatsnew/v0.16.2.txt @@ -163,5 +163,5 @@ Bug Fixes - Bug in ``Panel.apply`` when the result has ndim=0 (:issue:`10332`) - Bug in ``read_hdf`` where ``auto_close`` could not be passed (:issue:`9327`). - Bug in ``read_hdf`` where open stores could not be used (:issue:`10330`). -- Bug in adding empty ``DataFrame``s, now results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`). +- Bug in adding empty ``DataFrames``, now results in a ``DataFrame`` that ``.equals`` an empty ``DataFrame`` (:issue:`10181`). - Bug in ``to_hdf`` and ``HDFStore`` which did not check that complib choices were valid (:issue:`4582`, :issue:`8874`). diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 239b2ba96404c..ec8f318b72fef 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -1043,7 +1043,7 @@ Bug Fixes ~~~~~~~~~ - Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) -- Bug in ``.isin`` on older numpies (:issue: `11232`) +- Bug in ``.isin`` on older numpies (:issue:`11232`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`) - Bug in ``DatetimeIndex`` when localizing with ``NaT`` (:issue:`10477`) @@ -1094,7 +1094,7 @@ Bug Fixes - Bug in ``to_datetime`` and ``to_timedelta`` causing ``Index`` name to be lost (:issue:`10875`) -- Bug in ``len(DataFrame.groupby)`` causing ``IndexError`` when there's a column containing only NaNs (:issue: `11016`) +- Bug in ``len(DataFrame.groupby)`` causing ``IndexError`` when there's a column containing only NaNs (:issue:`11016`) - Bug that caused segfault when resampling an empty Series (:issue:`10228`) - Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 302105c1e653c..50d7877a9cd48 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -265,7 +265,7 @@ Individual columns can be parsed as a ``Categorical`` using a dict specification Categorical Concatenation ^^^^^^^^^^^^^^^^^^^^^^^^^ -- A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`:13763`, issue:`13846`, :issue:`14173`) +- A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`13763`, :issue:`13846`, :issue:`14173`) .. ipython:: python @@ -1525,7 +1525,7 @@ Bug Fixes - Bug in invalid datetime parsing in ``to_datetime`` and ``DatetimeIndex`` may raise ``TypeError`` rather than ``ValueError`` (:issue:`11169`, :issue:`11287`) - Bug in ``Index`` created with tz-aware ``Timestamp`` and mismatched ``tz`` option incorrectly coerces timezone (:issue:`13692`) - Bug in ``DatetimeIndex`` with nanosecond frequency does not include timestamp specified with ``end`` (:issue:`13672`) -- Bug in ```Series``` when setting a slice with a ```np.timedelta64``` (:issue:`14155`) +- Bug in ```Series`` when setting a slice with a ``np.timedelta64`` (:issue:`14155`) - Bug in ``Index`` raises ``OutOfBoundsDatetime`` if ``datetime`` exceeds ``datetime64[ns]`` bounds, rather than coercing to ``object`` dtype (:issue:`13663`) - Bug in ``Index`` may ignore specified ``datetime64`` or ``timedelta64`` passed as ``dtype`` (:issue:`13981`) - Bug in ``RangeIndex`` can be created without no arguments rather than raises ``TypeError`` (:issue:`13793`) diff --git a/doc/source/whatsnew/v0.19.1.txt b/doc/source/whatsnew/v0.19.1.txt index 545b4380d9b75..b8afe18e0f871 100644 --- a/doc/source/whatsnew/v0.19.1.txt +++ b/doc/source/whatsnew/v0.19.1.txt @@ -55,7 +55,7 @@ Bug Fixes - Bug in ``pd.concat`` with dataframes heterogeneous in length and tuple ``keys`` (:issue:`14438`) - Bug in ``MultiIndex.set_levels`` where illegal level values were still set after raising an error (:issue:`13754`) - Bug in ``DataFrame.to_json`` where ``lines=True`` and a value contained a ``}`` character (:issue:`14391`) -- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue`14327`) +- Bug in ``df.groupby`` causing an ``AttributeError`` when grouping a single index frame by a column and the index level (:issue:`14327`) - Bug in ``df.groupby`` where ``TypeError`` raised when ``pd.Grouper(key=...)`` is passed in a list (:issue:`14334`) - Bug in ``pd.pivot_table`` may raise ``TypeError`` or ``ValueError`` when ``index`` or ``columns`` is not scalar and ``values`` is not specified (:issue:`14380`) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 5f22b518ab6c4..5fb725a76770e 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1711,7 +1711,7 @@ Reshaping - Bug in ``pd.concat()`` in which concatenating with an empty dataframe with ``join='inner'`` was being improperly handled (:issue:`15328`) - Bug with ``sort=True`` in ``DataFrame.join`` and ``pd.merge`` when joining on indexes (:issue:`15582`) - Bug in ``DataFrame.nsmallest`` and ``DataFrame.nlargest`` where identical values resulted in duplicated rows (:issue:`15297`) -- Bug in :func:`pandas.pivot_table` incorrectly raising ``UnicodeError`` when passing unicode input for ```margins`` keyword (:issue:`13292`) +- Bug in :func:`pandas.pivot_table` incorrectly raising ``UnicodeError`` when passing unicode input for ``margins`` keyword (:issue:`13292`) Numeric ^^^^^^^ diff --git a/doc/source/whatsnew/v0.20.2.txt b/doc/source/whatsnew/v0.20.2.txt index 31125db0f34d4..3de6fbc8afaf8 100644 --- a/doc/source/whatsnew/v0.20.2.txt +++ b/doc/source/whatsnew/v0.20.2.txt @@ -44,7 +44,7 @@ Bug Fixes - Silenced a warning on some Windows environments about "tput: terminal attributes: No such device or address" when detecting the terminal size. This fix only applies to python 3 (:issue:`16496`) - Bug in using ``pathlib.Path`` or ``py.path.local`` objects with io functions (:issue:`16291`) -- Bug in ``Index.symmetric_difference()`` on two equal MultiIndex's, results in a ``TypeError`` (:issue `13490`) +- Bug in ``Index.symmetric_difference()`` on two equal MultiIndex's, results in a ``TypeError`` (:issue:`13490`) - Bug in ``DataFrame.update()`` with ``overwrite=False`` and ``NaN values`` (:issue:`15593`) - Passing an invalid engine to :func:`read_csv` now raises an informative ``ValueError`` rather than ``UnboundLocalError``. (:issue:`16511`) @@ -83,7 +83,7 @@ Plotting ^^^^^^^^ - Bug in ``DataFrame.plot`` with a single column and a list-like ``color`` (:issue:`3486`) -- Bug in ``plot`` where ``NaT`` in ``DatetimeIndex`` results in ``Timestamp.min`` (:issue: `12405`) +- Bug in ``plot`` where ``NaT`` in ``DatetimeIndex`` results in ``Timestamp.min`` (:issue:`12405`) - Bug in ``DataFrame.boxplot`` where ``figsize`` keyword was not respected for non-grouped boxplots (:issue:`11959`) diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 0c2e494f29bc1..3a257c1ff9648 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -895,7 +895,7 @@ nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You must explicitly register these methods: Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these -converters on first-use (:issue:17710). +converters on first-use (:issue:`17710`). .. note:: @@ -1047,7 +1047,7 @@ Conversion - Bug in :attr:`Timestamp.weekday_name` returning a UTC-based weekday name when localized to a timezone (:issue:`17354`) - Bug in ``Timestamp.replace`` when replacing ``tzinfo`` around DST changes (:issue:`15683`) - Bug in ``Timedelta`` construction and arithmetic that would not propagate the ``Overflow`` exception (:issue:`17367`) -- Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (`DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). +- Bug in :meth:`~DataFrame.astype` converting to object dtype when passed extension type classes (``DatetimeTZDtype``, ``CategoricalDtype``) rather than instances. Now a ``TypeError`` is raised when a class is passed (:issue:`17780`). - Bug in :meth:`to_numeric` in which elements were not always being coerced to numeric when ``errors='coerce'`` (:issue:`17007`, :issue:`17125`) - Bug in ``DataFrame`` and ``Series`` constructors where ``range`` objects are converted to ``int32`` dtype on Windows instead of ``int64`` (:issue:`16804`) diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 67c52dac6128d..2e9e616daf3a7 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -81,7 +81,7 @@ Deprecations ~~~~~~~~~~~~ - ``pandas.tseries.register`` has been renamed to - :func:`pandas.plotting.register_matplotlib_converters`` (:issue:`18301`) + :func:`pandas.plotting.register_matplotlib_converters` (:issue:`18301`) .. _whatsnew_0211.performance: @@ -101,7 +101,7 @@ Conversion - Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) - Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) - Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) -- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) +- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising ``TypeError`` (:issue:`18372`) - Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) - Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1660c8d9fcdc5..0e9a7ca777d69 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -569,10 +569,10 @@ Previous Behavior (and current behavior if on Python < 3.6): .. code-block:: ipython - In [1]: pd.Series({'Income': 2000, - 'Expenses': -1500, - 'Taxes': -200, - 'Net result': 300}) + pd.Series({'Income': 2000, + 'Expenses': -1500, + 'Taxes': -200, + 'Net result': 300}) Expenses -1500 Income 2000 Net result 300 diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt index b2d1d16e86990..b5ec5aa73ee9a 100644 --- a/doc/source/whatsnew/v0.8.0.txt +++ b/doc/source/whatsnew/v0.8.0.txt @@ -123,7 +123,7 @@ Other new features - Enhanced :ref:`read_csv/read_table <io.parse_dates>` for reading time series data and converting multiple columns to dates - Add :ref:`comments <io.comments>` option to parser functions: read_csv, etc. -- Add :ref`dayfirst <io.dayfirst>` option to parser functions for parsing +- Add :ref:`dayfirst <io.dayfirst>` option to parser functions for parsing international DD/MM/YYYY dates - Allow the user to specify the CSV reader :ref:`dialect <io.dialect>` to control quoting etc.
Over time, various whatsnew text files have had some errors that caused parsing errors for sphinx/rst. This PR cleans up various such bugs that have been causing wrong parsing output.
https://api.github.com/repos/pandas-dev/pandas/pulls/21021
2018-05-13T15:27:02Z
2018-05-15T00:02:04Z
2018-05-15T00:02:04Z
2018-05-18T18:05:14Z
BUG: CategoricalIndex.searchsorted doesn't return a scalar if input was scalar
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 9c29c34adb7dd..db39a1c558b97 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -88,7 +88,8 @@ Indexing - Bug in :meth:`MultiIndex.set_names` where error raised for a ``MultiIndex`` with ``nlevels == 1`` (:issue:`21149`) - Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`) - Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`) -- +- Bug in :func:`CategoricalIndex.searchsorted` where the method did not return a scalar when the input values was scalar (:issue:`21019`) +- Bug in :class:`CategoricalIndex` where slicing beyond the range of the data raised a ``KeyError`` (:issue:`21019`) I/O ^^^ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 30f9c56d24f02..8f670d21d9c44 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1342,6 +1342,8 @@ def searchsorted(self, value, side='left', sorter=None): if -1 in values_as_codes: raise ValueError("Value(s) to be inserted must be in categories.") + if is_scalar(value): + values_as_codes = values_as_codes.item() return self.codes.searchsorted(values_as_codes, side=side, sorter=sorter) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 150eca32e229d..d0d5f3e9de971 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -432,13 +432,14 @@ def get_loc(self, key, method=None): >>> monotonic_index.get_loc('b') slice(1, 3, None) - >>> non_monotonic_index = p.dCategoricalIndex(list('abcb')) + >>> non_monotonic_index = pd.CategoricalIndex(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True], dtype=bool) """ codes = self.categories.get_loc(key) if (codes == -1): raise KeyError(key) + return self._engine.get_loc(codes) def get_value(self, series, key): diff --git a/pandas/tests/categorical/test_analytics.py b/pandas/tests/categorical/test_analytics.py index 53d0e596a1d99..ab8d2f30f545a 100644 --- a/pandas/tests/categorical/test_analytics.py +++ b/pandas/tests/categorical/test_analytics.py @@ -86,9 +86,9 @@ def test_searchsorted(self): # Searching for single item argument, side='left' (default) res_cat = c1.searchsorted('apple') res_ser = s1.searchsorted('apple') - exp = np.array([2], dtype=np.intp) - tm.assert_numpy_array_equal(res_cat, exp) - tm.assert_numpy_array_equal(res_ser, exp) + exp = np.intp(2) + assert res_cat == exp + assert res_ser == exp # Searching for single item array, side='left' (default) res_cat = c1.searchsorted(['bread']) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 634ad0d8160ed..9f745700049ae 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -627,15 +627,80 @@ def test_reindexing(self): lambda: self.df2.reindex(['a'], limit=2)) def test_loc_slice(self): - # slicing - # not implemented ATM - # GH9748 + df = DataFrame( + {"A": range(0, 6)}, + index=CategoricalIndex(list("aabcde"), name="B"), + ) + + # slice on an unordered categorical using in-sample, connected edges + result = df.loc["b":"d"] + expected = df.iloc[2:5] + assert_frame_equal(result, expected) - pytest.raises(TypeError, lambda: self.df.loc[1:5]) + # Slice the entire dataframe + result = df.loc["a":"e"] + assert_frame_equal(result, df) + result_iloc = df.iloc[0:6] + assert_frame_equal(result_iloc, result) + + # check if the result is identical to an ordinary index + df_non_cat_index = df.copy() + df_non_cat_index.index = df_non_cat_index.index.astype(str) + result = df.loc["a":"e"] + result_non_cat = df_non_cat_index.loc["a": "e"] + result.index = result.index.astype(str) + assert_frame_equal(result_non_cat, result) + + @pytest.mark.parametrize( + "content", + [list("aab"), list("bbc"), list('bbc')], + ids=["right_edge", "left_edge", "both_edges"], + ) + def test_loc_beyond_edge_slicing(self, content): + """ + This test ensures that no `KeyError` is raised if trying to slice + beyond the edges of known, ordered categories. + + see GH21019 + """ + # This dataframe might be a slice of a larger categorical + # (i.e. more categories are known than there are in the column) + + ordered_df = DataFrame( + {"A": range(0, 3)}, + index=CategoricalIndex( + content, categories=list("abcde"), name="B", ordered=True + ), + ) + + # Although the edge is not within the slice, this should fall back + # to searchsorted slicing since the category is known and the index + # is ordered. Since we're selecting a value larger/lower than the + # right/left edge we should get the original slice again. + result = ordered_df.loc["a": "d"] + assert_frame_equal(result, ordered_df) + + # Ensure that index based slicing gives the same result + result_iloc = ordered_df.iloc[0:4] + assert_frame_equal(result, result_iloc) + + # If the categorical is not sorted and the requested edge + # is not in the slice we cannot perform slicing + ordered_df.index = ordered_df.index.as_unordered() + with pytest.raises(KeyError): + ordered_df.loc["a": "d"] - # result = df.loc[1:5] - # expected = df.iloc[[1,2,3,4]] - # assert_frame_equal(result, expected) + with pytest.raises(KeyError): + # If the category is not known, there is nothing we can do + ordered_df.loc["a":"z"] + + unordered_df = ordered_df.copy() + unordered_df.index = unordered_df.index.as_unordered() + with pytest.raises(KeyError): + # This operation previously succeeded for an ordered index. Since + # this index is no longer ordered, we cannot perfom out of range + # slicing / searchsorted + unordered_df.loc["a": "d"] def test_boolean_selection(self):
`CategoricalIndex.searchsorted` returns the wrong shape for scalar input. Numpy arrays and all other index types return a scalar if the input is a scalar, but the `CategoricalIndex` does not For example ``` >>> import numpy as np >>> np.array([1, 2, 3]).searchsorted(1) 0 >>> np.array([1, 2, 3]).searchsorted([1]) array([0]) >>> import pandas as pd >>> pd.Index([1, 2, 3]).searchsorted(1) 0 >>> pd.Index([1, 2, 3]).searchsorted([1]) array([0]) ``` This issue also affects slicing on sorted/ordered categoricals, which is why I've written another test for the slicing. - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - [ ] example in categoricals.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/21019
2018-05-13T09:51:10Z
2018-11-18T22:29:55Z
null
2018-11-18T22:29:55Z
BUG: .reset_index() should raise with an invalid level name (GH20925)
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 35d150dc263b8..90780f118016c 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -59,7 +59,7 @@ Conversion Indexing ^^^^^^^^ -- +- Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) - I/O diff --git a/pandas/core/series.py b/pandas/core/series.py index 0e2ae22f35af7..6d396e845219e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1195,12 +1195,13 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = com._default_index(len(self)) - if level is not None and isinstance(self.index, MultiIndex): + if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if len(level) < len(self.index.levels): - new_index = self.index.droplevel(level) + if isinstance(self.index, MultiIndex): + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index dce4e82cbdcf1..859082a7e722d 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -188,6 +188,11 @@ def test_reset_index_level(self): with tm.assert_raises_regex(IndexError, 'Too many levels'): s.reset_index(level=[0, 1, 2]) + # Check that .reset_index([],drop=True) doesn't fail + result = pd.Series(range(4)).reset_index([], drop=True) + expected = pd.Series(range(4)) + assert_series_equal(result, expected) + def test_reset_index_range(self): # GH 12071 s = pd.Series(range(2), name='A', dtype='int64') @@ -275,3 +280,18 @@ def test_set_axis_prior_to_deprecation_signature(self): with tm.assert_produces_warning(FutureWarning): result = s.set_axis(0, list('abcd'), inplace=False) tm.assert_series_equal(result, expected) + + def test_reset_index_drop_errors(self): + # GH 20925 + + # KeyError raised for series index when passed level name is missing + s = pd.Series(range(4)) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong', drop=True) + with tm.assert_raises_regex(KeyError, 'must be same as name'): + s.reset_index('wrong') + + # KeyError raised for series when level to be dropped is missing + s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2)) + with tm.assert_raises_regex(KeyError, 'not found'): + s.reset_index('wrong', drop=True)
#20925 Raises appropriate error for Series.reset_index(level_name, drop=True) when index is flat and an invalid level is supplied - [x] closes #20925 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21016
2018-05-12T14:21:23Z
2018-05-18T05:53:46Z
2018-05-18T05:53:45Z
2018-06-08T17:09:13Z
BUG: Concatentation of TZ-aware dataframes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d10d51352d0e4..72ca371f6c6be 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1372,6 +1372,8 @@ Reshaping - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`) - Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`) - Bug in :func:`isna`, which cannot handle ambiguous typed lists (:issue:`20675`) +- Bug in :func:`concat` which raises an error when concatenating TZ-aware dataframes and all-NaT dataframes (:issue:`12396`) +- Bug in :func:`concat` which raises an error when concatenating empty TZ-aware series (:issue:`18447`) Other ^^^^^ diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4aa74cdbbc2c0..9f6813bc38464 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -416,6 +416,13 @@ def _maybe_unwrap(x): fastpath=True) +def _concatenate_2d(to_concat, axis): + # coerce to 2d if needed & concatenate + if axis == 1: + to_concat = [np.atleast_2d(x) for x in to_concat] + return np.concatenate(to_concat, axis=axis) + + def _concat_datetime(to_concat, axis=0, typs=None): """ provide concatenation of an datetimelike array of arrays each of which is a @@ -432,61 +439,57 @@ def _concat_datetime(to_concat, axis=0, typs=None): a single array, preserving the combined dtypes """ - def convert_to_pydatetime(x, axis): - # coerce to an object dtype + if typs is None: + typs = get_dtype_kinds(to_concat) - # if dtype is of datetimetz or timezone - if x.dtype.kind == _NS_DTYPE.kind: - if getattr(x, 'tz', None) is not None: - x = x.astype(object).values - else: - shape = x.shape - x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(), - box="timestamp") - x = x.reshape(shape) + # multiple types, need to coerce to object + if len(typs) != 1: + return _concatenate_2d([_convert_datetimelike_to_object(x) + for x in to_concat], + axis=axis) - elif x.dtype == _TD_DTYPE: - shape = x.shape - x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel(), box=True) - x = x.reshape(shape) + # must be single dtype + if any(typ.startswith('datetime') for typ in typs): - if axis == 1: - x = np.atleast_2d(x) - return x + if 'datetime' in typs: + to_concat = [np.array(x, copy=False).view(np.int64) + for x in to_concat] + return _concatenate_2d(to_concat, axis=axis).view(_NS_DTYPE) + else: + # when to_concat has different tz, len(typs) > 1. + # thus no need to care + return _concat_datetimetz(to_concat) - if typs is None: - typs = get_dtype_kinds(to_concat) + elif 'timedelta' in typs: + return _concatenate_2d([x.view(np.int64) for x in to_concat], + axis=axis).view(_TD_DTYPE) - # must be single dtype - if len(typs) == 1: - _contains_datetime = any(typ.startswith('datetime') for typ in typs) - _contains_period = any(typ.startswith('period') for typ in typs) + elif any(typ.startswith('period') for typ in typs): + # PeriodIndex must be handled by PeriodIndex, + # Thus can't meet this condition ATM + # Must be changed when we adding PeriodDtype + raise NotImplementedError("unable to concat PeriodDtype") - if _contains_datetime: - if 'datetime' in typs: - new_values = np.concatenate([x.view(np.int64) for x in - to_concat], axis=axis) - return new_values.view(_NS_DTYPE) - else: - # when to_concat has different tz, len(typs) > 1. - # thus no need to care - return _concat_datetimetz(to_concat) - - elif 'timedelta' in typs: - new_values = np.concatenate([x.view(np.int64) for x in to_concat], - axis=axis) - return new_values.view(_TD_DTYPE) - - elif _contains_period: - # PeriodIndex must be handled by PeriodIndex, - # Thus can't meet this condition ATM - # Must be changed when we adding PeriodDtype - raise NotImplementedError - - # need to coerce to object - to_concat = [convert_to_pydatetime(x, axis) for x in to_concat] - return np.concatenate(to_concat, axis=axis) +def _convert_datetimelike_to_object(x): + # coerce datetimelike array to object dtype + + # if dtype is of datetimetz or timezone + if x.dtype.kind == _NS_DTYPE.kind: + if getattr(x, 'tz', None) is not None: + x = x.astype(object).values + else: + shape = x.shape + x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(), + box="timestamp") + x = x.reshape(shape) + + elif x.dtype == _TD_DTYPE: + shape = x.shape + x = tslib.ints_to_pytimedelta(x.view(np.int64).ravel(), box=True) + x = x.reshape(shape) + + return x def _concat_datetimetz(to_concat, name=None): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7837faf5b4c0f..df39eb5fd8312 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2190,10 +2190,10 @@ def _assert_take_fillable(self, values, indices, allow_fill=True, msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) - taken = values.take(indices) - mask = indices == -1 - if mask.any(): - taken[mask] = na_value + taken = algos.take(values, + indices, + allow_fill=allow_fill, + fill_value=na_value) else: taken = values.take(indices) return taken diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 474894aba65df..e7b2576ca1eae 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5835,7 +5835,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): if len(values) and values[0] is None: fill_value = None - if getattr(self.block, 'is_datetimetz', False): + if getattr(self.block, 'is_datetimetz', False) or \ + is_datetimetz(empty_dtype): pass elif getattr(self.block, 'is_categorical', False): pass diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 57af67422d65f..f5e58fa70e1c4 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1917,6 +1917,77 @@ def test_concat_tz_series_tzlocal(self): tm.assert_series_equal(result, pd.Series(x + y)) assert result.dtype == 'datetime64[ns, tzlocal()]' + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + @pytest.mark.parametrize('s', [pd.NaT, pd.Timestamp('20150101')]) + def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s): + # GH 12396 + + # tz-naive + first = pd.DataFrame([[pd.NaT], [pd.NaT]]).apply( + lambda x: x.dt.tz_localize(tz1)) + second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2)) + + result = pd.concat([first, second], axis=0) + expected = pd.DataFrame(pd.Series( + [pd.NaT, pd.NaT, s], index=[0, 1, 0])) + expected = expected.apply(lambda x: x.dt.tz_localize(tz2)) + if tz1 != tz2: + expected = expected.astype(object) + + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2): + # GH 12396 + + first = pd.DataFrame(pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) + second = pd.DataFrame(pd.Series( + [pd.NaT]).dt.tz_localize(tz2), columns=[1]) + expected = pd.DataFrame( + {0: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1), + 1: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2)} + ) + result = pd.concat([first, second], axis=1) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): + # GH 12396 + + # tz-naive + first = pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) + second = pd.DataFrame([[pd.Timestamp('2015/01/01', tz=tz2)], + [pd.Timestamp('2016/01/01', tz=tz2)]], + index=[2, 3]) + + expected = pd.DataFrame([pd.NaT, pd.NaT, + pd.Timestamp('2015/01/01', tz=tz2), + pd.Timestamp('2016/01/01', tz=tz2)]) + if tz1 != tz2: + expected = expected.astype(object) + + result = pd.concat([first, second]) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_concat_NaT_dataframes(self, tz): + # GH 12396 + + first = pd.DataFrame([[pd.NaT], [pd.NaT]]) + first = first.apply(lambda x: x.dt.tz_localize(tz)) + second = pd.DataFrame([[pd.Timestamp('2015/01/01', tz=tz)], + [pd.Timestamp('2016/01/01', tz=tz)]], + index=[2, 3]) + expected = pd.DataFrame([pd.NaT, pd.NaT, + pd.Timestamp('2015/01/01', tz=tz), + pd.Timestamp('2016/01/01', tz=tz)]) + + result = pd.concat([first, second], axis=0) + assert_frame_equal(result, expected) + def test_concat_period_series(self): x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D')) y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='D')) @@ -1978,6 +2049,21 @@ def test_concat_empty_series(self): columns=['x', 0]) tm.assert_frame_equal(res, exp) + @pytest.mark.parametrize('tz', [None, 'UTC']) + @pytest.mark.parametrize('values', [[], [1, 2, 3]]) + def test_concat_empty_series_timelike(self, tz, values): + # GH 18447 + + first = Series([], dtype='M8[ns]').dt.tz_localize(tz) + second = Series(values) + expected = DataFrame( + {0: pd.Series([pd.NaT] * len(values), + dtype='M8[ns]' + ).dt.tz_localize(tz), + 1: values}) + result = concat([first, second], axis=1) + assert_frame_equal(result, expected) + def test_default_index(self): # is_series and ignore_index s1 = pd.Series([1, 2, 3], name='x')
closes #12396 closes #18447 supersedes #19327
https://api.github.com/repos/pandas-dev/pandas/pulls/21014
2018-05-11T21:52:04Z
2018-05-13T13:17:46Z
null
2018-05-13T13:18:21Z
Don't raise warning on merging int and float with nan
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0204e655bfa2c..4d8897fb7c811 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -955,14 +955,14 @@ def _maybe_coerce_merge_keys(self): # check whether ints and floats elif is_integer_dtype(rk) and is_float_dtype(lk): - if not (lk == lk.astype(rk.dtype)).all(): + if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all(): warnings.warn('You are merging on int and float ' 'columns where the float values ' 'are not equal to their int ' 'representation', UserWarning) elif is_float_dtype(rk) and is_integer_dtype(lk): - if not (rk == rk.astype(lk.dtype)).all(): + if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all(): warnings.warn('You are merging on int and float ' 'columns where the float values ' 'are not equal to their int ' diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 436fe8f9f5d7e..8e639edd34b18 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1519,6 +1519,13 @@ def test_merge_on_ints_floats_warning(self): result = B.merge(A, left_on='Y', right_on='X') assert_frame_equal(result, expected[['Y', 'X']]) + # test no warning if float has NaNs + B = DataFrame({'Y': [np.nan, np.nan, 3.0]}) + + with tm.assert_produces_warning(None): + result = B.merge(A, left_on='Y', right_on='X') + assert_frame_equal(result, expected[['Y', 'X']]) + @pytest.mark.parametrize('df1_vals, df2_vals', [ ([0, 1, 2], ["0", "1", "2"]), ([0.0, 1.0, 2.0], ["0", "1", "2"]),
Closes https://github.com/pandas-dev/pandas/issues/20998
https://api.github.com/repos/pandas-dev/pandas/pulls/21011
2018-05-11T08:58:09Z
2018-05-11T11:46:29Z
2018-05-11T11:46:29Z
2018-05-24T20:20:22Z
CLN: Revert "Return Python integers" (not needed after #20989)
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 741e5553141f7..205ab9002379d 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -195,7 +195,7 @@ cdef class IndexEngine: if count > 1: return indexer if count == 1: - return int(found[0]) + return found[0] raise KeyError(val) diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index 4ea35da0626f3..fc66c1c987d6e 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -74,7 +74,7 @@ cdef class {{name}}Engine(IndexEngine): if count > 1: return indexer if count == 1: - return int(found[0]) + return found[0] raise KeyError(val)
This reverts commit 8e3d4d08501dc83d4c2302686c8342e7448c78e5. - [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` I had introduced the explicit casting to solve broken tests on Windows, but @jreback's #20989 is the correct way to do it, so these are now redundant.
https://api.github.com/repos/pandas-dev/pandas/pulls/21010
2018-05-11T08:54:04Z
2018-05-11T13:06:08Z
null
2018-05-11T13:06:31Z
ENH: Implement linspace behavior for timedelta_range and interval_range
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f7bc0e8d8c3f..a8a201558ec9b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -526,7 +526,7 @@ Other Enhancements - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) - :func:`cut` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`20947`) -- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) +- :func:`date_range`, :func:`timedelta_range`, and :func:`interval_range` now return a linearly spaced index if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`, :issue:`20976`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1d5c2d9a098ed..9761974d77d4b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -358,11 +358,6 @@ def __new__(cls, data=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - if data is None and freq is None \ - and com._any_none(periods, start, end): - raise ValueError("Must provide freq argument if no data is " - "supplied") - # if dtype has an embedded tz, capture it if dtype is not None: try: @@ -377,9 +372,13 @@ def __new__(cls, data=None, pass if data is None: - return cls._generate(start, end, periods, name, freq, - tz=tz, normalize=normalize, closed=closed, - ambiguous=ambiguous) + if freq is None and com._any_none(periods, start, end): + msg = 'Must provide freq argument if no data is supplied' + raise ValueError(msg) + else: + return cls._generate(start, end, periods, name, freq, tz=tz, + normalize=normalize, closed=closed, + ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -2590,11 +2589,6 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, """ Return a fixed frequency DatetimeIndex. - Of the three parameters `start`, `end`, `periods`, and `freq` exactly - three must be specified. If `freq` is omitted, the resulting DatetimeIndex - will have `periods` linearly spaced elements between `start` and `end` - (closed on both sides). - Parameters ---------- start : str or datetime-like, optional @@ -2628,9 +2622,20 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, See Also -------- pandas.DatetimeIndex : An immutable container for datetimes. + pandas.timedelta_range : Return a fixed frequency TimedeltaIndex. pandas.period_range : Return a fixed frequency PeriodIndex. pandas.interval_range : Return a fixed frequency IntervalIndex. + Notes + ----- + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``DatetimeIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). + + To learn more about the frequency strings, please see `this link + <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. + Examples -------- **Specifying the values** @@ -2769,8 +2774,10 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. Specifying ``freq`` is a requirement + for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not + desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. @@ -2779,6 +2786,9 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, ------- rng : DatetimeIndex """ + if freq is None: + msg = 'freq must be specified for bdate_range; use date_range instead' + raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 766ac7b14120e..408a8cc435b63 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -6,7 +6,8 @@ from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex from pandas.core.dtypes.dtypes import IntervalDtype -from pandas.core.dtypes.cast import maybe_convert_platform, find_common_type +from pandas.core.dtypes.cast import ( + maybe_convert_platform, find_common_type, maybe_downcast_to_dtype) from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, @@ -1465,8 +1466,13 @@ def interval_range(start=None, end=None, periods=None, freq=None, Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``IntervalIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end``, inclusively. + + To learn more about datetime-like frequency strings, please see `this link + <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- @@ -1505,6 +1511,14 @@ def interval_range(start=None, end=None, periods=None, freq=None, (2017-03-01, 2017-04-01]] closed='right', dtype='interval[datetime64[ns]]') + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.interval_range(start=0, end=6, periods=4) + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]] + closed='right', + dtype='interval[float64]') + The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. @@ -1516,19 +1530,21 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - start = com._maybe_box_datetimelike(start) end = com._maybe_box_datetimelike(end) - endpoint = next(com._not_none(start, end)) + endpoint = start if start is not None else end + + if freq is None and com._any_none(periods, start, end): + freq = 1 if is_number(endpoint) else 'D' + + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, and ' + 'freq, exactly three must be specified') if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' raise ValueError(msg.format(start=start)) - - if not _is_valid_endpoint(end): + elif not _is_valid_endpoint(end): msg = 'end must be numeric or datetime-like, got {end}' raise ValueError(msg.format(end=end)) @@ -1538,8 +1554,7 @@ def interval_range(start=None, end=None, periods=None, freq=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - freq = freq or (1 if is_number(endpoint) else 'D') - if not is_number(freq): + if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError: @@ -1552,28 +1567,34 @@ def interval_range(start=None, end=None, periods=None, freq=None, _is_type_compatible(end, freq)]): raise TypeError("start, end, freq need to be type compatible") + # +1 to convert interval count to breaks count (n breaks = n-1 intervals) + if periods is not None: + periods += 1 + if is_number(endpoint): + # compute the period/start/end if unspecified (at most one) if periods is None: - periods = int((end - start) // freq) - - if start is None: - start = end - periods * freq - - # force end to be consistent with freq (lower if freq skips over end) - end = start + periods * freq - - # end + freq for inclusive endpoint - breaks = np.arange(start, end + freq, freq) - elif isinstance(endpoint, Timestamp): - # add one to account for interval endpoints (n breaks = n-1 intervals) - if periods is not None: - periods += 1 - breaks = date_range(start=start, end=end, periods=periods, freq=freq) + periods = int((end - start) // freq) + 1 + elif start is None: + start = end - (periods - 1) * freq + elif end is None: + end = start + (periods - 1) * freq + + # force end to be consistent with freq (lower if freq skips end) + if freq is not None: + end -= end % freq + + breaks = np.linspace(start, end, periods) + if all(is_integer(x) for x in com._not_none(start, end, freq)): + # np.linspace always produces float output + breaks = maybe_downcast_to_dtype(breaks, 'int64') else: - # add one to account for interval endpoints (n breaks = n-1 intervals) - if periods is not None: - periods += 1 - breaks = timedelta_range(start=start, end=end, periods=periods, - freq=freq) + # delegate to the appropriate range function + if isinstance(endpoint, Timestamp): + range_func = date_range + else: + range_func = timedelta_range + + breaks = range_func(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6b278fc35c831..9707d19953418 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -225,13 +225,13 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) - if data is None and freq is None: - raise ValueError("Must provide freq argument if no data is " - "supplied") - if data is None: - return cls._generate(start, end, periods, name, freq, - closed=closed) + if freq is None and com._any_none(periods, start, end): + msg = 'Must provide freq argument if no data is supplied' + raise ValueError(msg) + else: + return cls._generate(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -266,10 +266,10 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, return cls._simple_new(data, name=name, freq=freq) @classmethod - def _generate(cls, start, end, periods, name, offset, closed=None): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and ' - 'periods, exactly two must be specified') + def _generate(cls, start, end, periods, name, freq, closed=None): + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') if start is not None: start = Timedelta(start) @@ -295,8 +295,11 @@ def _generate(cls, start, end, periods, name, offset, closed=None): else: raise ValueError("Closed has to be either 'left', 'right' or None") - index = _generate_regular_range(start, end, periods, offset) - index = cls._simple_new(index, name=name, freq=offset) + if freq is not None: + index = _generate_regular_range(start, end, periods, freq) + index = cls._simple_new(index, name=name, freq=freq) + else: + index = to_timedelta(np.linspace(start.value, end.value, periods)) if not left_closed: index = index[1:] @@ -1046,7 +1049,7 @@ def _generate_regular_range(start, end, periods, offset): return data -def timedelta_range(start=None, end=None, periods=None, freq='D', +def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default @@ -1074,8 +1077,10 @@ def timedelta_range(start=None, end=None, periods=None, freq='D', Notes ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. @@ -1102,6 +1107,17 @@ def timedelta_range(start=None, end=None, periods=None, freq='D', TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') + + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) + TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', + '5 days 00:00:00'], + dtype='timedelta64[ns]', freq=None) """ + if freq is None and com._any_none(periods, start, end): + freq = 'D' + return TimedeltaIndex(start=start, end=end, periods=periods, freq=freq, name=name, closed=closed) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3fb088329f225..193804b66395b 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -361,6 +361,10 @@ def test_constructor(self): with tm.assert_raises_regex(TypeError, msg): bdate_range('2011-1-1', '2012-1-1', 'B') + msg = 'freq must be specified for bdate_range; use date_range instead' + with tm.assert_raises_regex(TypeError, msg): + bdate_range(START, END, periods=10, freq=None) + def test_naive_aware_conflicts(self): naive = bdate_range(START, END, freq=BDay(), tz=None) aware = bdate_range(START, END, freq=BDay(), tz="Asia/Hong_Kong") diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 203e8e3128edc..0fadfcf0c7f28 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -6,9 +6,9 @@ from pandas import ( Interval, IntervalIndex, Timestamp, Timedelta, DateOffset, interval_range, date_range, timedelta_range) +from pandas.core.dtypes.common import is_integer from pandas.tseries.offsets import Day import pandas.util.testing as tm -import pandas as pd @pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) @@ -23,200 +23,198 @@ def name(request): class TestIntervalRange(object): - def test_construction_from_numeric(self, closed, name): - # combinations of start/end/periods without freq - expected = IntervalIndex.from_breaks( - np.arange(0, 6), name=name, closed=closed) - - result = interval_range(start=0, end=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=5, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with freq - expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)], - name=name, closed=closed) - - result = interval_range(start=0, end=6, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=6, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)], - name=name, closed=closed) - result = interval_range(start=0, end=4, freq=1.5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('tz', [None, 'US/Eastern']) - def test_construction_from_timestamp(self, closed, name, tz): - # combinations of start/end/periods without freq - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-06', tz=tz) - breaks = date_range(start=start, end=end) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with fixed freq - freq = '2D' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-07', tz=tz) - breaks = date_range(start=start, end=end, freq=freq) + @pytest.mark.parametrize('freq, periods', [ + (1, 100), (2.5, 40), (5, 20), (25, 4)]) + def test_constructor_numeric(self, closed, name, freq, periods): + start, end = 0, 100 + breaks = np.arange(101, step=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timestamp('2017-01-08', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed) tm.assert_index_equal(result, expected) - # combinations of start/end/periods with non-fixed freq - freq = 'M' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-12-31', tz=tz) + @pytest.mark.parametrize('tz', [None, 'US/Eastern']) + @pytest.mark.parametrize('freq, periods', [ + ('D', 364), ('2D', 182), ('22D18H', 16), ('M', 11)]) + def test_constructor_timestamp(self, closed, name, freq, periods, tz): + start, end = Timestamp('20180101', tz=tz), Timestamp('20181231', tz=tz) breaks = date_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=11, freq=freq, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=11, freq=freq, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timestamp('2018-01-15', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) + # GH 20976: linspace behavior defined from start/end/periods + if not breaks.freq.isAnchored() and tz is None: + # matches expected only for non-anchored offsets and tz naive + # (anchored/DST transitions cause unequal spacing in expected) + result = interval_range(start=start, end=end, periods=periods, + name=name, closed=closed) + tm.assert_index_equal(result, expected) - def test_construction_from_timedelta(self, closed, name): - # combinations of start/end/periods without freq - start, end = Timedelta('1 day'), Timedelta('6 days') - breaks = timedelta_range(start=start, end=end) + @pytest.mark.parametrize('freq, periods', [ + ('D', 100), ('2D12H', 40), ('5D', 20), ('25D', 4)]) + def test_constructor_timedelta(self, closed, name, freq, periods): + start, end = Timedelta('0 days'), Timedelta('100 days') + breaks = timedelta_range(start=start, end=end, freq=freq) expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - result = interval_range(start=start, end=end, name=name, - closed=closed) + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(start=start, periods=5, name=name, - closed=closed) + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=5, name=name, - closed=closed) + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed) tm.assert_index_equal(result, expected) - # combinations of start/end/periods with fixed freq - freq = '2D' - start, end = Timedelta('1 day'), Timedelta('7 days') - breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed) tm.assert_index_equal(result, expected) - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) + @pytest.mark.parametrize('start, end, freq, expected_endpoint', [ + (0, 10, 3, 9), + (Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')), + (Timestamp('2018-01-01'), + Timestamp('2018-02-09'), + 'MS', + Timestamp('2018-02-01')), + (Timestamp('2018-01-01', tz='US/Eastern'), + Timestamp('2018-01-20', tz='US/Eastern'), + '5D12H', + Timestamp('2018-01-17 12:00:00', tz='US/Eastern'))]) + def test_early_truncation(self, start, end, freq, expected_endpoint): + # index truncates early if freq causes end to be skipped + result = interval_range(start=start, end=end, freq=freq) + result_endpoint = result.right[-1] + assert result_endpoint == expected_endpoint + + @pytest.mark.parametrize('start, mid, end', [ + (Timestamp('2018-03-10', tz='US/Eastern'), + Timestamp('2018-03-10 23:30:00', tz='US/Eastern'), + Timestamp('2018-03-12', tz='US/Eastern')), + (Timestamp('2018-11-03', tz='US/Eastern'), + Timestamp('2018-11-04 00:30:00', tz='US/Eastern'), + Timestamp('2018-11-05', tz='US/Eastern'))]) + def test_linspace_dst_transition(self, start, mid, end): + # GH 20976: linspace behavior defined from start/end/periods + # accounts for the hour gained/lost during DST transition + result = interval_range(start=start, end=end, periods=2) + expected = IntervalIndex.from_breaks([start, mid, end]) tm.assert_index_equal(result, expected) - # output truncates early if freq causes end to be skipped. - end = Timedelta('7 days 1 hour') - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('freq', [2, 2.0]) + @pytest.mark.parametrize('end', [10, 10.0]) + @pytest.mark.parametrize('start', [0, 0.0]) + def test_float_subtype(self, start, end, freq): + # Has float subtype if any of start/end/freq are float, even if all + # resulting endpoints can safely be upcast to integers + + # defined from start/end/freq + index = interval_range(start=start, end=end, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + end + freq) else 'float64' + assert result == expected + + # defined from start/periods/freq + index = interval_range(start=start, periods=5, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + freq) else 'float64' + assert result == expected + + # defined from end/periods/freq + index = interval_range(end=end, periods=5, freq=freq) + result = index.dtype.subtype + expected = 'int64' if is_integer(end + freq) else 'float64' + assert result == expected + + # GH 20976: linspace behavior defined from start/end/periods + index = interval_range(start=start, end=end, periods=5) + result = index.dtype.subtype + expected = 'int64' if is_integer(start + end) else 'float64' + assert result == expected def test_constructor_coverage(self): # float value for periods - expected = pd.interval_range(start=0, periods=10) - result = pd.interval_range(start=0, periods=10.5) + expected = interval_range(start=0, periods=10) + result = interval_range(start=0, periods=10.5) tm.assert_index_equal(result, expected) # equivalent timestamp-like start/end start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15') - expected = pd.interval_range(start=start, end=end) + expected = interval_range(start=start, end=end) - result = pd.interval_range(start=start.to_pydatetime(), - end=end.to_pydatetime()) + result = interval_range(start=start.to_pydatetime(), + end=end.to_pydatetime()) tm.assert_index_equal(result, expected) - result = pd.interval_range(start=start.asm8, end=end.asm8) + result = interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timestamp equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1), DateOffset(days=1)] for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) + result = interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) # equivalent timedelta-like start/end start, end = Timedelta(days=1), Timedelta(days=10) - expected = pd.interval_range(start=start, end=end) + expected = interval_range(start=start, end=end) - result = pd.interval_range(start=start.to_pytimedelta(), - end=end.to_pytimedelta()) + result = interval_range(start=start.to_pytimedelta(), + end=end.to_pytimedelta()) tm.assert_index_equal(result, expected) - result = pd.interval_range(start=start.asm8, end=end.asm8) + result = interval_range(start=start.asm8, end=end.asm8) tm.assert_index_equal(result, expected) # equivalent freq with timedelta equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)] for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) + result = interval_range(start=start, end=end, freq=freq) tm.assert_index_equal(result, expected) def test_errors(self): # not enough params - msg = ('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') + msg = ('Of the four parameters: start, end, periods, and freq, ' + 'exactly three must be specified') with tm.assert_raises_regex(ValueError, msg): interval_range(start=0) @@ -232,7 +230,7 @@ def test_errors(self): # too many params with tm.assert_raises_regex(ValueError, msg): - interval_range(start=0, end=5, periods=6) + interval_range(start=0, end=5, periods=6, freq=1.5) # mixed units msg = 'start, end, freq need to be type compatible' diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py index 784ef845fea10..87dff74cd04d7 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -1,9 +1,9 @@ +import pytest import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.tseries.offsets import Day, Second from pandas import to_timedelta, timedelta_range -from pandas.util.testing import assert_frame_equal class TestTimedeltas(object): @@ -46,12 +46,20 @@ def test_timedelta_range(self): df.index = pd.timedelta_range(start='0s', periods=10, freq='s') expected = df.loc[pd.Timedelta('0s'):, :] result = df.loc['0s':, :] - assert_frame_equal(expected, result) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize('periods, freq', [ + (3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')]) + def test_linspace_behavior(self, periods, freq): + # GH 20976 + result = timedelta_range(start='0 days', end='4 days', periods=periods) + expected = timedelta_range(start='0 days', end='4 days', freq=freq) + tm.assert_index_equal(result, expected) def test_errors(self): # not enough params - msg = ('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') + msg = ('Of the four parameters: start, end, periods, and freq, ' + 'exactly three must be specified') with tm.assert_raises_regex(ValueError, msg): timedelta_range(start='0 days') @@ -66,4 +74,4 @@ def test_errors(self): # too many params with tm.assert_raises_regex(ValueError, msg): - timedelta_range(start='0 days', end='5 days', periods=10) + timedelta_range(start='0 days', end='5 days', periods=10, freq='H')
- [X] closes #20976 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Summary: - added linspace behavior support to `timedelta_range` - added linspace behavior support to `interval_range` - refactored `test_interval_range.py` to use `parametrize`; split up some tests - added a check to `bdate_range` to ensure that `freq` is specified - doesn't really make sense to support linspace behavior since `bdate_range` implies a frequency - same underlying code as `date_range`; could get linspace behavior by overriding `freq` to `None` - updated and cleaned docstrings for all `*_range` functions except `period_range`
https://api.github.com/repos/pandas-dev/pandas/pulls/21009
2018-05-11T05:09:16Z
2018-05-11T11:53:49Z
2018-05-11T11:53:49Z
2018-05-11T14:50:46Z
MyPy cleanup and absolute imports in pandas.core.dtypes.common
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index ce87c0a8b0c5a..30949ca6d1d6b 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -195,13 +195,13 @@ def __setitem__(self, key, value): ) def __len__(self): + # type: () -> int """Length of this array Returns ------- length : int """ - # type: () -> int raise AbstractMethodError(self) def __iter__(self): diff --git a/pandas/core/base.py b/pandas/core/base.py index c331ead8d2fef..6625a3bbe97d7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -788,6 +788,7 @@ def base(self): @property def _ndarray_values(self): + # type: () -> np.ndarray """The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily @@ -795,7 +796,6 @@ def _ndarray_values(self): - categorical -> codes """ - # type: () -> np.ndarray if is_extension_array_dtype(self): return self.values._ndarray_values return self.values diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c45838e6040a9..05f82c67ddb8b 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -5,17 +5,19 @@ PY3, PY36) from pandas._libs import algos, lib from pandas._libs.tslibs import conversion -from .dtypes import (CategoricalDtype, CategoricalDtypeType, - DatetimeTZDtype, DatetimeTZDtypeType, - PeriodDtype, PeriodDtypeType, - IntervalDtype, IntervalDtypeType, - ExtensionDtype, PandasExtensionDtype) -from .generic import (ABCCategorical, ABCPeriodIndex, - ABCDatetimeIndex, ABCSeries, - ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, - ABCIndexClass, ABCDateOffset) -from .inference import is_string_like, is_list_like -from .inference import * # noqa +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, + DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype, + IntervalDtypeType, ExtensionDtype, PandasExtensionDtype) +from pandas.core.dtypes.generic import ( + ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, + ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass, + ABCDateOffset) +from pandas.core.dtypes.inference import ( # noqa:F401 + is_bool, is_integer, is_hashable, is_iterator, is_float, + is_dict_like, is_scalar, is_string_like, is_list_like, is_number, + is_file_like, is_re, is_re_compilable, is_sequence, is_nested_list_like, + is_named_tuple, is_array_like, is_decimal, is_complex, is_interval) _POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
Starting the conversation towards #14468 There were a couple misplaced annotations below docstrings that were causing mypy to choke when running: ```bash mypy pandas --ignore-missing-imports ``` A starred import in pandas.core.dtypes.common was responsible for a lot of complaints from mypy. These were being intentionally suppressed by flake8 but I figure it makes sense to clean up and convert into absolute imports
https://api.github.com/repos/pandas-dev/pandas/pulls/21008
2018-05-11T02:37:51Z
2018-06-23T16:12:19Z
2018-06-23T16:12:19Z
2018-06-23T16:12:23Z
DOC: cleanup of v0.23.0.txt
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..a262a85722329 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -519,7 +519,7 @@ Other Enhancements - :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) - :class:`pandas.tseries.api.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). -- :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 +- :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5 - Updated :meth:`DataFrame.to_gbq` and :meth:`pandas.read_gbq` signature and documentation to reflect changes from the Pandas-GBQ library version 0.4.0. Adds intersphinx mapping to Pandas-GBQ library. (:issue:`20564`) @@ -569,9 +569,9 @@ Previous Behavior (and current behavior if on Python < 3.6): .. code-block:: ipython In [1]: pd.Series({'Income': 2000, - ... 'Expenses': -1500, - ... 'Taxes': -200, - ... 'Net result': 300}) + 'Expenses': -1500, + 'Taxes': -200, + 'Net result': 300}) Expenses -1500 Income 2000 Net result 300 @@ -806,7 +806,7 @@ Extraction of matching patterns from strings By default, extracting matching patterns from strings with :func:`str.extract` used to return a ``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was -extracted``). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless +extracted). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless ``expand`` is set to ``False``. Finallay, ``None`` was an accepted value for the ``expand`` parameter (which was equivalent to ``False``), but now raises a ``ValueError``. (:issue:`11386`) @@ -917,9 +917,9 @@ Datetimelike API Changes - ``pandas.tseries.frequencies.get_freq_group()`` and ``pandas.tseries.frequencies.DAYS`` are removed from the public API (:issue:`18034`) - :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`) - :attr:`Series.first` and :attr:`DataFrame.first` will now raise a ``TypeError`` - rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`). -- :attr:`Series.last` and :attr:`DateFrame.last` will now raise a ``TypeError`` - rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex`` (:issue:`20725`). + rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex` (:issue:`20725`). +- :attr:`Series.last` and :attr:`DataFrame.last` will now raise a ``TypeError`` + rather than ``NotImplementedError`` when index is not a :class:`DatetimeIndex` (:issue:`20725`). - Restricted ``DateOffset`` keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`, :issue:`18226`). - :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with ``freq=None``, addition or subtraction of integer-dtyped array or ``Index`` will raise ``NullFrequencyError`` instead of ``TypeError`` (:issue:`19895`) @@ -1364,7 +1364,7 @@ Reshaping - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) - Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) -- Bug in :class:`Series` constructor with ``Categorical`` where a ```ValueError`` is not raised when an index of different length is given (:issue:`19342`) +- Bug in :class:`Series` constructor with ``Categorical`` where a ``ValueError`` is not raised when an index of different length is given (:issue:`19342`) - Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`) - Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`) - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`)
Cleanup of some errors in ``v.0.23.0.txt``
https://api.github.com/repos/pandas-dev/pandas/pulls/21007
2018-05-10T21:56:35Z
2018-05-11T06:49:14Z
2018-05-11T06:49:14Z
2018-05-18T18:05:14Z
Whatsnew Typo Correction
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 750227cd59f26..3f7bc0e8d8c3f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -555,8 +555,8 @@ If installed, we now require: .. _whatsnew_0230.api_breaking.dict_insertion_order: -Instantation from dicts preserves dict insertion order for python 3.6+ -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Instantiation from dicts preserves dict insertion order for python 3.6+ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Until Python 3.6, dicts in Python had no formally defined ordering. For Python version 3.6 and later, dicts are ordered by insertion order, see
https://api.github.com/repos/pandas-dev/pandas/pulls/21006
2018-05-10T19:56:55Z
2018-05-10T19:59:45Z
2018-05-10T19:59:45Z
2018-05-14T21:11:03Z
DOC: Added 0.23.1 whatsnew template
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt new file mode 100644 index 0000000000000..5c9c3e2931bd9 --- /dev/null +++ b/doc/source/whatsnew/v0.23.1.txt @@ -0,0 +1,82 @@ +.. _whatsnew_0231: + +v0.23.1 +------- + +This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes +and bug fixes. We recommend that all users upgrade to this version. + +.. contents:: What's new in v0.23.1 + :local: + :backlinks: none + +.. _whatsnew_0231.enhancements: + +New features +~~~~~~~~~~~~ + + +.. _whatsnew_0231.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- + +.. _whatsnew_0231.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- + +Documentation Changes +~~~~~~~~~~~~~~~~~~~~~ + +- +- + +.. _whatsnew_0231.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +- +- + +Conversion +^^^^^^^^^^ + +- +- + +Indexing +^^^^^^^^ + +- +- + +I/O +^^^ + +- +- + +Plotting +^^^^^^^^ + +- +- + +Reshaping +^^^^^^^^^ + +- +- + +Categorical +^^^^^^^^^^^ + +-
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21001
2018-05-10T15:33:15Z
2018-05-16T03:00:32Z
2018-05-16T03:00:32Z
2018-06-12T14:33:51Z
PERF: removed coercion to int64 for arrays of ints in Categorical.from_codes
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 0ffd5f881d626..ae1d7029217a4 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -51,6 +51,7 @@ def setup(self): self.values_some_nan = list(np.tile(self.categories + [np.nan], N)) self.values_all_nan = [np.nan] * len(self.values) + self.values_all_int8 = np.ones(N, 'int8') def time_regular(self): pd.Categorical(self.values, self.categories) @@ -70,6 +71,9 @@ def time_with_nan(self): def time_all_nan(self): pd.Categorical(self.values_all_nan) + def time_from_codes_all_int8(self): + pd.Categorical.from_codes(self.values_all_int8, self.categories) + class ValueCounts(object): diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d10d51352d0e4..99b931db99c2c 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1079,6 +1079,7 @@ Performance Improvements - Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) - Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifiested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) +- Improved performance of :func:`pandas.core.arrays.Categorical.from_codes` (:issue:`18501`) .. _whatsnew_0230.docs: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f91782459df67..abcb9ae3494b5 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -578,7 +578,7 @@ def from_codes(cls, codes, categories, ordered=False): unordered. """ try: - codes = np.asarray(codes, np.int64) + codes = coerce_indexer_dtype(np.asarray(codes), categories) except (ValueError, TypeError): raise ValueError( "codes need to be convertible to an arrays of integers")
- [x] closes #18501 - [ ] tests added / **passed** - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ``` In [3]: arr = np.ones(10000000,dtype='int8') # master In [4]: %timeit pd.Categorical.from_codes(arr, ['foo', 'bar']) 44.2 ms ± 545 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) # after patch In [4]: %timeit pd.Categorical.from_codes(arr, ['foo', 'bar']) 9 ms ± 54.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ``` ``` before after ratio [6d5d7015] [fb1f7b84] 9.24±1ms 9.94±0.6ms 1.08 categoricals.Concat.time_concat 5.52±0.1ms 5.41±0.05ms 0.98 categoricals.Concat.time_union 32.0±0.3ms 32.3±0.3ms 1.01 categoricals.Constructor.time_all_nan 1.32±0.02ms 1.28±0.01ms 0.97 categoricals.Constructor.time_datetimes 1.26±0.01ms 1.29±0.02ms 1.02 categoricals.Constructor.time_datetimes_with_nat 354±3μs 349±7μs 0.99 categoricals.Constructor.time_fastpath 20.0±0.08ms 20.1±0.3ms 1.01 categoricals.Constructor.time_regular 185±1ms 186±0.6ms 1.01 categoricals.Constructor.time_with_nan 10.1ms 10.1ms 0.99 categoricals.Isin.time_isin_categorical('int64') 10.7±0.08ms 10.8±0.07ms 1.00 categoricals.Isin.time_isin_categorical('object') 9.11±0.1ms 9.04±0.2ms 0.99 categoricals.Rank.time_rank_int 9.33±0.1ms 9.37±0.1ms 1.00 categoricals.Rank.time_rank_int_cat 9.13±0.1ms 8.97±0.05ms 0.98 categoricals.Rank.time_rank_int_cat_ordered 141±0.9ms 136±1ms 0.97 categoricals.Rank.time_rank_string 11.2±0.2ms 11.1±0.1ms 0.99 categoricals.Rank.time_rank_string_cat 9.04±0.1ms 9.23±0.1ms 1.02 categoricals.Rank.time_rank_string_cat_ordered 592±5μs 586±3μs 0.99 categoricals.Repr.time_rendering 32.8±2ms 28.4±0.6ms ~0.86 categoricals.SetCategories.time_set_categories 31.8±2ms 29.6±0.1ms 0.93 categoricals.ValueCounts.time_value_counts(False) 30.7±0.1ms 29.3±0.2ms 0.96 categoricals.ValueCounts.time_value_counts(True) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21000
2018-05-10T14:38:06Z
2018-05-15T02:20:05Z
2018-05-15T02:20:05Z
2018-05-15T02:20:06Z
API: Added axis to take
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e8f74cf58a262..88bc497f9f22d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1448,7 +1448,7 @@ def func(arr, indexer, out, fill_value=np.nan): return func -def take(arr, indices, allow_fill=False, fill_value=None): +def take(arr, indices, axis=0, allow_fill=False, fill_value=None): """ Take elements from an array. @@ -1461,6 +1461,8 @@ def take(arr, indices, allow_fill=False, fill_value=None): to an ndarray. indices : sequence of integers Indices to be taken. + axis : int, default 0 + The axis over which to select values. allow_fill : bool, default False How to handle negative values in `indices`. @@ -1476,6 +1478,9 @@ def take(arr, indices, allow_fill=False, fill_value=None): This may be ``None``, in which case the default NA value for the type (``self.dtype.na_value``) is used. + For multi-dimensional `arr`, each *element* is filled with + `fill_value`. + Returns ------- ndarray or ExtensionArray @@ -1529,10 +1534,11 @@ def take(arr, indices, allow_fill=False, fill_value=None): if allow_fill: # Pandas style, -1 means NA validate_indices(indices, len(arr)) - result = take_1d(arr, indices, allow_fill=True, fill_value=fill_value) + result = take_1d(arr, indices, axis=axis, allow_fill=True, + fill_value=fill_value) else: # NumPy style - result = arr.take(indices) + result = arr.take(indices, axis=axis) return result diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 2b78c91f9dac5..9ab147edb8d1b 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -447,6 +447,29 @@ def test_2d_datetime64(self): expected[:, [2, 4]] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected) + def test_take_axis_0(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1]) + expected = np.array([[0, 1, 2], [9, 10, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0) + expected = np.array([[0, 1, 2], [0, 0, 0]]) + tm.assert_numpy_array_equal(result, expected) + + def test_take_axis_1(self): + arr = np.arange(12).reshape(4, 3) + result = algos.take(arr, [0, -1], axis=1) + expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]]) + tm.assert_numpy_array_equal(result, expected) + + # allow_fill=True + result = algos.take(arr, [0, -1], axis=1, allow_fill=True, + fill_value=0) + expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]]) + tm.assert_numpy_array_equal(result, expected) + class TestExtensionTake(object): # The take method found in pd.api.extensions
- [x] closes #20932 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` One thing that's worrisome here: for N-D arrays, the `fill_value` argument is set for each element. In the case of IPArray, this is fine, since my storage NA-value is (0, 0), so I would pass `fill_value=0`. But in general, when people are using an N-D array to back a 1-D column, that won't necessarily work. But at that point, maybe we recommend people implement take on their own.
https://api.github.com/repos/pandas-dev/pandas/pulls/20999
2018-05-10T14:27:29Z
2018-05-10T18:25:31Z
2018-05-10T18:25:30Z
2018-05-10T18:25:35Z
ENH: Raise useful error when iterating a Window
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..8d20d7b6b78bd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -966,6 +966,7 @@ Other API Changes - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). - :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) - A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than a ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) +- Rolling and Expanding types raise ``NotImplementedError`` upon iteration (:issue:`11704`). .. _whatsnew_0230.deprecations: diff --git a/pandas/core/window.py b/pandas/core/window.py index 5fd054b1930e6..015e7f7913ed0 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -181,6 +181,10 @@ def __unicode__(self): return "{klass} [{attrs}]".format(klass=self._window_type, attrs=','.join(attrs)) + def __iter__(self): + url = 'https://github.com/pandas-dev/pandas/issues/11704' + raise NotImplementedError('See issue #11704 {url}'.format(url=url)) + def _get_index(self, index=None): """ Return index as ndarrays diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 93f637a561718..d8e90ae0e1b35 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -512,6 +512,14 @@ def test_multi_index_names(self): tm.assert_index_equal(result.columns, df.columns) assert result.index.names == [None, '1', '2'] + @pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.rolling(2)) + class TestExpanding(Base): @@ -590,6 +598,14 @@ def test_missing_minp_zero(self): expected = pd.Series([np.nan]) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame]) + def test_iter_raises(self, klass): + # https://github.com/pandas-dev/pandas/issues/11704 + # Iteration over a Window + obj = klass([1, 2, 3, 4]) + with pytest.raises(NotImplementedError): + iter(obj.expanding(2)) + class TestEWM(Base):
Until Issue #11704 is completed, raise a NotImplementedError to provide a more clear error message when attempting to iterate over a Rolling or Expanding window.
https://api.github.com/repos/pandas-dev/pandas/pulls/20996
2018-05-09T22:12:22Z
2018-05-12T18:38:15Z
2018-05-12T18:38:15Z
2018-05-12T18:38:20Z
DEPR: DataFrame dropna accepting multiple axes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 32f7447e5ef77..c47c7878843c2 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1005,6 +1005,7 @@ Deprecations - Setting ``PeriodIndex.freq`` (which was not guaranteed to work correctly) is deprecated. Use :meth:`PeriodIndex.asfreq` instead (:issue:`20678`) - ``Index.get_duplicates()`` is deprecated and will be removed in a future version (:issue:`20239`) - The previous default behavior of negative indices in ``Categorical.take`` is deprecated. In a future version it will change from meaning missing values to meaning positional indices from the right. The future behavior is consistent with :meth:`Series.take` (:issue:`20664`). +- Passing multiple axes to the ``axis`` parameter in :func:`DataFrame.dropna` has been deprecated and will be removed in a future version (:issue:`20987`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d43fb95a70555..0437c479c9d81 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4168,14 +4168,15 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, Parameters ---------- - axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof + axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. - Pass tuple or list to drop on multiple axes. + .. deprecated:: 0.23.0: Pass tuple or list to drop on multiple + axes. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. @@ -4259,6 +4260,11 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): + # GH20987 + msg = ("supplying multiple axes to axis is deprecated and " + "will be removed in a future version.") + warnings.warn(msg, FutureWarning, stacklevel=2) + result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 668eae21c664f..f1113fd6debf2 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -174,8 +174,12 @@ def test_dropna_multiple_axes(self): [np.nan, np.nan, np.nan, np.nan], [7, np.nan, 8, 9]]) cp = df.copy() - result = df.dropna(how='all', axis=[0, 1]) - result2 = df.dropna(how='all', axis=(0, 1)) + + # GH20987 + with tm.assert_produces_warning(FutureWarning): + result = df.dropna(how='all', axis=[0, 1]) + with tm.assert_produces_warning(FutureWarning): + result2 = df.dropna(how='all', axis=(0, 1)) expected = df.dropna(how='all').dropna(how='all', axis=1) assert_frame_equal(result, expected) @@ -183,7 +187,8 @@ def test_dropna_multiple_axes(self): assert_frame_equal(df, cp) inp = df.copy() - inp.dropna(how='all', axis=(0, 1), inplace=True) + with tm.assert_produces_warning(FutureWarning): + inp.dropna(how='all', axis=(0, 1), inplace=True) assert_frame_equal(inp, expected) def test_dropna_tz_aware_datetime(self):
Deprecates multiple axes passing to `dropna`. I added the whatsnew to 0.23.0 for now, I can move this to 0.24.0 later if preferred once the whatsnew doc is created. - [x] closes #20987 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20995
2018-05-09T21:41:01Z
2018-05-10T10:32:10Z
2018-05-10T10:32:10Z
2018-05-10T21:02:01Z
DOC: update Readme recommended tags for new contributors
diff --git a/README.md b/README.md index cd2cb99992977..3c8fe57400099 100644 --- a/README.md +++ b/README.md @@ -233,7 +233,7 @@ All contributions, bug reports, bug fixes, documentation improvements, enhanceme A detailed overview on how to contribute can be found in the **[contributing guide.](https://pandas.pydata.org/pandas-docs/stable/contributing.html)** -If you are simply looking to start working with the pandas codebase, navigate to the [GitHub “issues” tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [Difficulty Novice](https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22) where you could start out. +If you are simply looking to start working with the pandas codebase, navigate to the [GitHub “issues” tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out. You can also triage issues which may include reproducing bug reports, or asking for vital information such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to pandas on CodeTriage](https://www.codetriage.com/pandas-dev/pandas). diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 58f097c2fc5f3..e9939250052f1 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -17,8 +17,8 @@ If you are brand new to pandas or open-source development, we recommend going through the `GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ to find issues that interest you. There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ -and `Difficulty Novice -<https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ +and `good first issue +<https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open>`_ where you could start out. Once you've found an interesting issue, you can return here to get your development environment setup.
This changes the recommended tag for new contributors from `Difficulty Novice` to `good first issue` in both main README.md and /doc/source/contributing.rst. closes #20982
https://api.github.com/repos/pandas-dev/pandas/pulls/20992
2018-05-09T15:16:31Z
2018-05-09T15:19:59Z
2018-05-09T15:19:59Z
2018-05-09T15:19:59Z
COMPAT: 32-bit indexing compat
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 9968d398e9040..741e5553141f7 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -8,7 +8,8 @@ from cpython.slice cimport PySlice_Check import numpy as np cimport numpy as cnp -from numpy cimport ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t +from numpy cimport (ndarray, float64_t, int32_t, + int64_t, uint8_t, uint64_t, intp_t) cnp.import_array() cdef extern from "numpy/arrayobject.h": @@ -183,8 +184,8 @@ cdef class IndexEngine: cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[cnp.uint8_t, ndim=1, cast=True] indexer - ndarray[int64_t, ndim=1] found + ndarray[uint8_t, ndim=1, cast=True] indexer + ndarray[intp_t, ndim=1] found int count indexer = self._get_index_values() == val diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index 6f726dd49f11e..4ea35da0626f3 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -55,8 +55,8 @@ cdef class {{name}}Engine(IndexEngine): cdef _maybe_get_bool_indexer(self, object val): cdef: - ndarray[cnp.uint8_t, ndim=1, cast=True] indexer - ndarray[int64_t, ndim=1] found + ndarray[uint8_t, ndim=1, cast=True] indexer + ndarray[intp_t, ndim=1] found ndarray[{{ctype}}] values int count = 0
xref #19539
https://api.github.com/repos/pandas-dev/pandas/pulls/20989
2018-05-09T10:52:04Z
2018-05-10T09:57:01Z
2018-05-10T09:57:01Z
2018-05-10T09:57:15Z
BUG: date_range linspace behavior respects tz
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6991bc016868..6f5c180c587bd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -525,7 +525,7 @@ Other Enhancements library. (:issue:`20564`) - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) -- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`) +- :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1b5aa3b45f3b5..1d5c2d9a098ed 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -587,10 +587,13 @@ def _generate(cls, start, end, periods, name, freq, if end is not None: end = end.tz_localize(tz).asm8 else: + # Create a linearly spaced date_range in local time + start = start.tz_localize(tz) + end = end.tz_localize(tz) index = tools.to_datetime(np.linspace(start.value, - end.value, periods)) - if tz is not None: - index = index.tz_localize('UTC').tz_convert(tz) + end.value, periods), + utc=True) + index = index.tz_convert(tz) if not left_closed and len(index) and index[0] == start: index = index[1:] diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index bbe9cb65eb1a9..3fb088329f225 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -164,20 +164,39 @@ def test_date_range_ambiguous_arguments(self): def test_date_range_convenience_periods(self): # GH 20808 - rng = date_range('2018-04-24', '2018-04-27', periods=3) - exp = DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', - '2018-04-27 00:00:00'], freq=None) + result = date_range('2018-04-24', '2018-04-27', periods=3) + expected = DatetimeIndex(['2018-04-24 00:00:00', + '2018-04-25 12:00:00', + '2018-04-27 00:00:00'], freq=None) - tm.assert_index_equal(rng, exp) + tm.assert_index_equal(result, expected) # Test if spacing remains linear if tz changes to dst in range - rng = date_range('2018-04-01 01:00:00', '2018-04-01 04:00:00', - tz='Australia/Sydney', periods=3) - exp = DatetimeIndex(['2018-04-01 01:00:00+11:00', - '2018-04-01 02:00:00+11:00', - '2018-04-01 02:00:00+10:00', - '2018-04-01 03:00:00+10:00', - '2018-04-01 04:00:00+10:00'], freq=None) + result = date_range('2018-04-01 01:00:00', + '2018-04-01 04:00:00', + tz='Australia/Sydney', + periods=3) + expected = DatetimeIndex([Timestamp('2018-04-01 01:00:00+1100', + tz='Australia/Sydney'), + Timestamp('2018-04-01 02:00:00+1000', + tz='Australia/Sydney'), + Timestamp('2018-04-01 04:00:00+1000', + tz='Australia/Sydney')]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('start,end,result_tz', [ + ['20180101', '20180103', 'US/Eastern'], + [datetime(2018, 1, 1), datetime(2018, 1, 3), 'US/Eastern'], + [Timestamp('20180101'), Timestamp('20180103'), 'US/Eastern'], + [Timestamp('20180101', tz='US/Eastern'), + Timestamp('20180103', tz='US/Eastern'), 'US/Eastern'], + [Timestamp('20180101', tz='US/Eastern'), + Timestamp('20180103', tz='US/Eastern'), None]]) + def test_date_range_linspacing_tz(self, start, end, result_tz): + # GH 20983 + result = date_range(start, end, periods=3, tz=result_tz) + expected = date_range('20180101', periods=3, freq='D', tz='US/Eastern') + tm.assert_index_equal(result, expected) def test_date_range_businesshour(self): idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00',
- [x] closes #20983 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (addendum to the v0.23 whatsnew entry for this feature)
https://api.github.com/repos/pandas-dev/pandas/pulls/20988
2018-05-09T06:47:02Z
2018-05-10T10:26:23Z
2018-05-10T10:26:23Z
2018-05-10T15:22:35Z
Consistent Return Structure for Rolling Apply
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c6991bc016868..32f7447e5ef77 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1328,6 +1328,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) - Bug in :func:`DataFrame.resample` where ``ffill``, ``bfill``, ``pad``, ``backfill``, ``fillna``, ``interpolate``, and ``asfreq`` were ignoring ``loffset``. (:issue:`20744`) - Bug in :func:`DataFrame.groupby` when applying a function that has mixed data types and the user supplied function can fail on the grouping column (:issue:`20949`) +- Bug in :func:`DataFrameGroupBy.rolling().apply() <pandas.core.window.Rolling.apply>` where operations performed against the associated :class:`DataFrameGroupBy` object could impact the inclusion of the grouped item(s) in the result (:issue:`14013`) Sparse ^^^^^^ diff --git a/pandas/core/window.py b/pandas/core/window.py index d7f9f7c85fbbc..5fd054b1930e6 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -837,11 +837,7 @@ def _apply(self, func, name=None, window=None, center=None, index, indexi = self._get_index(index=index) results = [] for b in blocks: - try: - values = self._prep_values(b.values) - except TypeError: - results.append(b.values.copy()) - continue + values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 304e3d02466a5..93f637a561718 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -105,7 +105,6 @@ def test_attribute_access(self): def tests_skip_nuisance(self): df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'}) - r = df.rolling(window=3) result = r[['A', 'B']].sum() expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9], @@ -113,9 +112,12 @@ def tests_skip_nuisance(self): columns=list('AB')) tm.assert_frame_equal(result, expected) - expected = concat([r[['A', 'B']].sum(), df[['C']]], axis=1) - result = r.sum() - tm.assert_frame_equal(result, expected, check_like=True) + def test_skip_sum_object_raises(self): + df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'}) + r = df.rolling(window=3) + + with tm.assert_raises_regex(TypeError, 'cannot handle this type'): + r.sum() def test_agg(self): df = DataFrame({'A': range(5), 'B': range(0, 10, 2)}) @@ -3174,6 +3176,28 @@ def test_rolling_apply(self, raw): lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) + def test_rolling_apply_mutability(self): + # GH 14013 + df = pd.DataFrame({'A': ['foo'] * 3 + ['bar'] * 3, 'B': [1] * 6}) + g = df.groupby('A') + + mi = pd.MultiIndex.from_tuples([('bar', 3), ('bar', 4), ('bar', 5), + ('foo', 0), ('foo', 1), ('foo', 2)]) + + mi.names = ['A', None] + # Grouped column should not be a part of the output + expected = pd.DataFrame([np.nan, 2., 2.] * 2, columns=['B'], index=mi) + + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + # Call an arbitrary function on the groupby + g.sum() + + # Make sure nothing has been mutated + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + def test_expanding(self): g = self.frame.groupby('A') r = g.expanding()
- [X] closes #14013 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry With the changes made in #20949 it appears possible to fix this bug by suppressing a `catch` block that was internal to Rolling's `_apply`. I'm not sure what the purpose of this catch as it essentially allows values to bypass the applied function...Only one test broke on removal which looked wrong anyway, so I updated it as such
https://api.github.com/repos/pandas-dev/pandas/pulls/20984
2018-05-09T00:07:16Z
2018-05-09T10:24:39Z
2018-05-09T10:24:38Z
2018-09-04T23:49:29Z
Preserve Alignment Between Index and Values for Non-Monotonic Stack
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 0829aa8f5a509..622e3bd553f8c 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -653,7 +653,13 @@ def _convert_level_number(level_num, columns): # time to ravel the values new_data = {} level_vals = this.columns.levels[-1] - level_labels = sorted(set(this.columns.labels[-1])) + level_labels = list() + for label in this.columns.labels[-1]: + # GH 20945 if labels are not monotonic we were mangling + # alignment when moving to index; ensure we preserve order + if label not in level_labels: + level_labels.append(label) + level_vals_used = level_vals[level_labels] levsize = len(level_labels) drop_cols = [] diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index d89731dc09044..26bd72ebbf263 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -414,6 +414,25 @@ def test_stack_mixed_levels(self): assert_frame_equal(df3.stack(level=['animal', 0]), animal_hair_stacked, check_names=False) + def test_stack_retains_index_order_non_monotonic(self): + # GH 20945 + df = pd.DataFrame([ + ['DIM', 'A', 1, 2, 3, 4], + ['DIM', 'B', 11, 22, 33, 44], + ]) + df.columns = ["dim1", "dim2", 'c', 'b', 'a', 'd'] + df.columns.name = 'foo' + df = df.set_index(["dim1", "dim2"]) + + expected_mi = pd.MultiIndex.from_product([['DIM'], ['c', 'b', 'a', 'd']]) + expected_mi.names = ['dim1', 'foo'] + expected = pd.DataFrame([[1, 11], [2, 22], [3, 33], [4, 44]], + index=expected_mi, columns=['A', 'B']) + expected.columns.name = 'dim2' + + result = df.unstack('dim2').stack(level=0) + tm.assert_frame_equal(result, expected) + def test_stack_int_level_names(self): columns = MultiIndex.from_tuples( [('A', 'cat', 'long'), ('B', 'cat', 'long'),
- [X] closes #20945 - [X] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Not overly familiar with this code so submitting for review as there's probably a better way of going about it. The root cause of the referenced issue IIUC is that the index labels of the caller are non-monotonic. `stack` essentially takes values and labels from the level that is getting pushed down into the rows with an implicit assumption that both are monotonic, hence the index/values get misaligned. This breaks at least one other test so not ready to merge, but looking for feedback on: - If there's a better way to align the values with the labels in this function AND/OR - If we make any guarantees about the order of the labels for the level(s) being moved in this function
https://api.github.com/repos/pandas-dev/pandas/pulls/20980
2018-05-08T20:33:07Z
2018-06-02T18:42:29Z
null
2018-12-25T06:13:04Z
Parametrization of indexes/test_base #4
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 377b17d45265c..f4fa547574b9e 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -13,7 +13,7 @@ from pandas.tests.indexes.common import Base from pandas.compat import (range, lrange, lzip, u, - text_type, zip, PY3, PY35, PY36, PYPY) + text_type, zip, PY3, PY35, PY36, PYPY, StringIO) import operator import numpy as np @@ -67,9 +67,9 @@ def generate_index_types(self, skip_index_keys=[]): yield key, index def test_can_hold_identifiers(self): - idx = self.create_index() - key = idx[0] - assert idx._can_hold_identifiers_and_holds_name(key) is True + index = self.create_index() + key = index[0] + assert index._can_hold_identifiers_and_holds_name(key) is True def test_new_axis(self): new_index = self.dateIndex[None, :] @@ -1280,8 +1280,8 @@ def test_get_indexer_strings_raises(self): def test_get_indexer_numeric_index_boolean_target(self): # GH 16877 - numeric_idx = pd.Index(range(4)) - result = numeric_idx.get_indexer([True, False, True]) + numeric_index = pd.Index(range(4)) + result = numeric_index.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @@ -1748,16 +1748,18 @@ def test_indexing_doesnt_change_class(self): assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_)) def test_outer_join_sort(self): - left_idx = Index(np.random.permutation(15)) - right_idx = tm.makeDateIndex(10) + left_index = Index(np.random.permutation(15)) + right_index = tm.makeDateIndex(10) with tm.assert_produces_warning(RuntimeWarning): - result = left_idx.join(right_idx, how='outer') + result = left_index.join(right_index, how='outer') - # right_idx in this case because DatetimeIndex has join precedence over - # Int64Index + # right_index in this case because DatetimeIndex has join precedence + # over Int64Index with tm.assert_produces_warning(RuntimeWarning): - expected = right_idx.astype(object).union(left_idx.astype(object)) + expected = right_index.astype(object).union( + left_index.astype(object)) + tm.assert_index_equal(result, expected) def test_nan_first_take_datetime(self): @@ -1840,228 +1842,230 @@ def test_reindex_no_type_preserve_target_empty_mi(self): assert result.levels[1].dtype.type == np.float64 def test_groupby(self): - idx = Index(range(5)) - groups = idx.groupby(np.array([1, 1, 2, 2, 2])) - exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} - tm.assert_dict_equal(groups, exp) + index = Index(range(5)) + result = index.groupby(np.array([1, 1, 2, 2, 2])) + expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} - def test_equals_op_multiindex(self): + tm.assert_dict_equal(result, expected) + + @pytest.mark.parametrize("mi,expected", [ + (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])), + (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))]) + def test_equals_op_multiindex(self, mi, expected): # GH9785 # test comparisons of multiindex - from pandas.compat import StringIO df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) - tm.assert_numpy_array_equal(df.index == df.index, - np.array([True, True])) - - mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)]) - tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True])) - mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)]) - tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False])) - mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) - with tm.assert_raises_regex(ValueError, "Lengths must match"): - df.index == mi3 - index_a = Index(['foo', 'bar', 'baz']) + result = df.index == mi + tm.assert_numpy_array_equal(result, expected) + + def test_equals_op_multiindex_identify(self): + df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) + + result = df.index == df.index + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", [ + MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]), + Index(['foo', 'bar', 'baz'])]) + def test_equals_op_mismatched_multiindex_raises(self, index): + df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) + with tm.assert_raises_regex(ValueError, "Lengths must match"): - df.index == index_a - tm.assert_numpy_array_equal(index_a == mi3, - np.array([False, False, False])) + df.index == index - def test_conversion_preserves_name(self): - # GH 10875 - i = pd.Index(['01:02:03', '01:02:04'], name='label') - assert i.name == pd.to_datetime(i).name - assert i.name == pd.to_timedelta(i).name + def test_equals_op_index_vs_mi_same_length(self): + mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) + index = Index(['foo', 'bar', 'baz']) - def test_string_index_repr(self): - # py3/py2 repr can differ because of "u" prefix - # which also affects to displayed element size + result = mi == index + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) - if PY3: - coerce = lambda x: x - else: - coerce = unicode # noqa + @pytest.mark.parametrize("dt_conv", [ + pd.to_datetime, pd.to_timedelta]) + def test_dt_conversion_preserves_name(self, dt_conv): + # GH 10875 + index = pd.Index(['01:02:03', '01:02:04'], name='label') + assert index.name == dt_conv(index).name + @pytest.mark.skipif(not PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # ASCII # short - idx = pd.Index(['a', 'bb', 'ccc']) - if PY3: - expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')""" - assert repr(idx) == expected - else: - expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')""" - assert coerce(idx) == expected - + (pd.Index(['a', 'bb', 'ccc']), + u"""Index(['a', 'bb', 'ccc'], dtype='object')"""), # multiple lines - idx = pd.Index(['a', 'bb', 'ccc'] * 10) - if PY3: - expected = u"""\ + (pd.Index(['a', 'bb', 'ccc'] * 10), + u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - dtype='object')""" - - assert repr(idx) == expected - else: - expected = u"""\ -Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', - u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', - u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object')""" - - assert coerce(idx) == expected - + dtype='object')"""), # truncated - idx = pd.Index(['a', 'bb', 'ccc'] * 100) - if PY3: - expected = u"""\ + (pd.Index(['a', 'bb', 'ccc'] * 100), + u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - dtype='object', length=300)""" + dtype='object', length=300)"""), - assert repr(idx) == expected - else: - expected = u"""\ + # Non-ASCII + # short + (pd.Index([u'あ', u'いい', u'ううう']), + u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう'],\n" + u" dtype='object')")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + u"'あ', 'いい', 'ううう', 'あ',\n" + u" ...\n" + u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr(self, index, expected): + result = repr(index) + assert result == expected + + @pytest.mark.skipif(PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # ASCII + # short + (pd.Index(['a', 'bb', 'ccc']), + u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""), + # multiple lines + (pd.Index(['a', 'bb', 'ccc'] * 10), + u"""\ +Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', + u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', + u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], + dtype='object')"""), + # truncated + (pd.Index(['a', 'bb', 'ccc'] * 100), + u"""\ Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', ... u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], - dtype='object', length=300)""" - - assert coerce(idx) == expected + dtype='object', length=300)"""), + # Non-ASCII # short - idx = pd.Index([u'あ', u'いい', u'ううう']) - if PY3: - expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')""" - assert repr(idx) == expected - else: - expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')""" - assert coerce(idx) == expected - + (pd.Index([u'あ', u'いい', u'ううう']), + u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""), # multiple lines - idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう'],\n" - u" dtype='object')") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object')") - assert coerce(idx) == expected - + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " + u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" + u" dtype='object')")), # truncated - idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " - u"'あ', 'いい', 'ううう', 'あ',\n" - u" ...\n" - u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう'],\n" - u" dtype='object', length=300)") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " - u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" - u" dtype='object', length=300)") - - assert coerce(idx) == expected + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" + u" ...\n" + u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " + u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_compat(self, index, expected): + result = unicode(index) # noqa + assert result == expected - # Emable Unicode option ----------------------------------------- + @pytest.mark.skipif(not PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # short + (pd.Index([u'あ', u'いい', u'ううう']), + (u"Index(['あ', 'いい', 'ううう'], " + u"dtype='object')")), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ', 'いい', 'ううう'],\n" + u" dtype='object')""")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + u"'ううう', 'あ', 'いい', 'ううう',\n" + u" 'あ',\n" + u" ...\n" + u" 'ううう', 'あ', 'いい', 'ううう', 'あ', " + u"'いい', 'ううう', 'あ', 'いい',\n" + u" 'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_with_unicode_option(self, index, expected): + # Enable Unicode option ----------------------------------------- with cf.option_context('display.unicode.east_asian_width', True): + result = repr(index) + assert result == expected - # short - idx = pd.Index([u'あ', u'いい', u'ううう']) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう'], " - u"dtype='object')") - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう'], " - u"dtype='object')") - assert coerce(idx) == expected - - # multiple lines - idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ', 'いい', 'ううう'],\n" - u" dtype='object')""") - - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう'],\n" - u" dtype='object')") - - assert coerce(idx) == expected - - # truncated - idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) - if PY3: - expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " - u"'ううう', 'あ', 'いい', 'ううう',\n" - u" 'あ',\n" - u" ...\n" - u" 'ううう', 'あ', 'いい', 'ううう', 'あ', " - u"'いい', 'ううう', 'あ', 'いい',\n" - u" 'ううう'],\n" - u" dtype='object', length=300)") - - assert repr(idx) == expected - else: - expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " - u"u'ううう', u'あ', u'いい',\n" - u" u'ううう', u'あ',\n" - u" ...\n" - u" u'ううう', u'あ', u'いい', u'ううう', " - u"u'あ', u'いい', u'ううう', u'あ',\n" - u" u'いい', u'ううう'],\n" - u" dtype='object', length=300)") - - assert coerce(idx) == expected + @pytest.mark.skipif(PY3, reason="compat test") + @pytest.mark.parametrize("index,expected", [ + # short + (pd.Index([u'あ', u'いい', u'ううう']), + (u"Index([u'あ', u'いい', u'ううう'], " + u"dtype='object')")), + # multiple lines + (pd.Index([u'あ', u'いい', u'ううう'] * 10), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう'],\n" + u" dtype='object')")), + # truncated + (pd.Index([u'あ', u'いい', u'ううう'] * 100), + (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " + u"u'ううう', u'あ', u'いい',\n" + u" u'ううう', u'あ',\n" + u" ...\n" + u" u'ううう', u'あ', u'いい', u'ううう', " + u"u'あ', u'いい', u'ううう', u'あ',\n" + u" u'いい', u'ううう'],\n" + u" dtype='object', length=300)"))]) + def test_string_index_repr_with_unicode_option_compat(self, index, + expected): + # Enable Unicode option ----------------------------------------- + with cf.option_context('display.unicode.east_asian_width', True): + result = unicode(index) # noqa + assert result == expected @pytest.mark.parametrize('dtype', [np.int64, np.float64]) @pytest.mark.parametrize('delta', [1, 0, -1]) def test_addsub_arithmetic(self, dtype, delta): # GH 8142 delta = dtype(delta) - idx = pd.Index([10, 11, 12], dtype=dtype) - result = idx + delta - expected = pd.Index(idx.values + delta, dtype=dtype) + index = pd.Index([10, 11, 12], dtype=dtype) + result = index + delta + expected = pd.Index(index.values + delta, dtype=dtype) tm.assert_index_equal(result, expected) # this subtraction used to fail - result = idx - delta - expected = pd.Index(idx.values - delta, dtype=dtype) + result = index - delta + expected = pd.Index(index.values - delta, dtype=dtype) tm.assert_index_equal(result, expected) - tm.assert_index_equal(idx + idx, 2 * idx) - tm.assert_index_equal(idx - idx, 0 * idx) - assert not (idx - idx).empty + tm.assert_index_equal(index + index, 2 * index) + tm.assert_index_equal(index - index, 0 * index) + assert not (index - index).empty def test_iadd_preserves_name(self): # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name @@ -2075,14 +2079,14 @@ def test_iadd_preserves_name(self): assert ser.index.name == "foo" def test_cached_properties_not_settable(self): - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) with tm.assert_raises_regex(AttributeError, "Can't set attribute"): - idx.is_unique = False + index.is_unique = False def test_get_duplicates_deprecated(self): - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) with tm.assert_produces_warning(FutureWarning): - idx.get_duplicates() + index.get_duplicates() class TestMixedIntIndex(Base): @@ -2100,43 +2104,42 @@ def create_index(self): return self.mixedIndex def test_argsort(self): - idx = self.create_index() + index = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): - result = idx.argsort() + result = index.argsort() elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): - result = idx.argsort() + result = index.argsort() else: - result = idx.argsort() - expected = np.array(idx).argsort() + result = index.argsort() + expected = np.array(index).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): - idx = self.create_index() + index = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): - result = np.argsort(idx) + result = np.argsort(index) elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): - result = np.argsort(idx) + result = np.argsort(index) else: - result = np.argsort(idx) - expected = idx.argsort() + result = np.argsort(index) + expected = index.argsort() tm.assert_numpy_array_equal(result, expected) def test_copy_name(self): # Check that "name" argument passed at initialization is honoured # GH12309 - idx = self.create_index() + index = self.create_index() - first = idx.__class__(idx, copy=True, name='mario') + first = index.__class__(index, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second - # Not using tm.assert_index_equal() since names differ: - assert idx.equals(first) + tm.assert_index_equal(first, second) assert first.name == 'mario' assert second.name == 'mario' @@ -2154,77 +2157,85 @@ def test_copy_name(self): def test_copy_name2(self): # Check that adding a "name" parameter to the copy is honored # GH14302 - idx = pd.Index([1, 2], name='MyName') - idx1 = idx.copy() - - assert idx.equals(idx1) - assert idx.name == 'MyName' - assert idx1.name == 'MyName' - - idx2 = idx.copy(name='NewName') + index = pd.Index([1, 2], name='MyName') + index1 = index.copy() - assert idx.equals(idx2) - assert idx.name == 'MyName' - assert idx2.name == 'NewName' + tm.assert_index_equal(index, index1) - idx3 = idx.copy(names=['NewName']) + index2 = index.copy(name='NewName') + tm.assert_index_equal(index, index2, check_names=False) + assert index.name == 'MyName' + assert index2.name == 'NewName' - assert idx.equals(idx3) - assert idx.name == 'MyName' - assert idx.names == ['MyName'] - assert idx3.name == 'NewName' - assert idx3.names == ['NewName'] + index3 = index.copy(names=['NewName']) + tm.assert_index_equal(index, index3, check_names=False) + assert index.name == 'MyName' + assert index.names == ['MyName'] + assert index3.name == 'NewName' + assert index3.names == ['NewName'] def test_union_base(self): - idx = self.create_index() - first = idx[3:] - second = idx[:5] + index = self.create_index() + first = index[3:] + second = index[:5] if PY3: - with tm.assert_produces_warning(RuntimeWarning): - # unorderable types - result = first.union(second) - expected = Index(['b', 2, 'c', 0, 'a', 1]) - tm.assert_index_equal(result, expected) + # unorderable types + warn_type = RuntimeWarning else: + warn_type = None + + with tm.assert_produces_warning(warn_type): result = first.union(second) - expected = Index(['b', 2, 'c', 0, 'a', 1]) - tm.assert_index_equal(result, expected) + expected = Index(['b', 2, 'c', 0, 'a', 1]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("klass", [ + np.array, Series, list]) + def test_union_different_type_base(self, klass): # GH 10149 - cases = [klass(second.values) - for klass in [np.array, Series, list]] - for case in cases: - if PY3: - with tm.assert_produces_warning(RuntimeWarning): - # unorderable types - result = first.union(case) - assert tm.equalContents(result, idx) - else: - result = first.union(case) - assert tm.equalContents(result, idx) + index = self.create_index() + first = index[3:] + second = index[:5] + + if PY3: + # unorderable types + warn_type = RuntimeWarning + else: + warn_type = None + + with tm.assert_produces_warning(warn_type): + result = first.union(klass(second.values)) + + assert tm.equalContents(result, index) def test_intersection_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:5] - second = idx[:3] + index = self.create_index() + first = index[:5] + second = index[:3] + result = first.intersection(second) expected = Index([0, 'a', 1]) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("klass", [ + np.array, Series, list]) + def test_intersection_different_type_base(self, klass): # GH 10149 - cases = [klass(second.values) - for klass in [np.array, Series, list]] - for case in cases: - result = first.intersection(case) - assert tm.equalContents(result, second) + index = self.create_index() + first = index[:5] + second = index[:3] + + result = first.intersection(klass(second.values)) + assert tm.equalContents(result, second) def test_difference_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:4] - second = idx[3:] + index = self.create_index() + first = index[:4] + second = index[3:] result = first.difference(second) expected = Index([0, 1, 'a']) @@ -2232,103 +2243,102 @@ def test_difference_base(self): def test_symmetric_difference(self): # (same results for py2 and py3 but sortedness not tested elsewhere) - idx = self.create_index() - first = idx[:4] - second = idx[3:] + index = self.create_index() + first = index[:4] + second = index[3:] result = first.symmetric_difference(second) expected = Index([0, 1, 2, 'a', 'c']) tm.assert_index_equal(result, expected) def test_logical_compat(self): - idx = self.create_index() - assert idx.all() == idx.values.all() - assert idx.any() == idx.values.any() - - def test_dropna(self): + index = self.create_index() + assert index.all() == index.values.all() + assert index.any() == index.values.any() + + @pytest.mark.parametrize("how", ['any', 'all']) + @pytest.mark.parametrize("dtype", [ + None, object, 'category']) + @pytest.mark.parametrize("vals,expected", [ + ([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]), + ([1., 2., np.nan, 3.], [1., 2., 3.]), + (['A', 'B', 'C'], ['A', 'B', 'C']), + (['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])]) + def test_dropna(self, how, dtype, vals, expected): # GH 6194 - for dtype in [None, object, 'category']: - idx = pd.Index([1, 2, 3], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - - idx = pd.Index([1., 2., 3.], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.Index(['A', 'B', 'C'], dtype=dtype) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype) - tm.assert_index_equal(nanidx.dropna(), idx) - - tm.assert_index_equal(nanidx.dropna(how='any'), idx) - tm.assert_index_equal(nanidx.dropna(how='all'), idx) - - idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', - '2011-01-03', pd.NaT]) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days']) - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', - '3 days', pd.NaT]) - tm.assert_index_equal(nanidx.dropna(), idx) - - idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M') - tm.assert_index_equal(idx.dropna(), idx) - nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], - freq='M') - tm.assert_index_equal(nanidx.dropna(), idx) + index = pd.Index(vals, dtype=dtype) + result = index.dropna(how=how) + expected = pd.Index(expected, dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("how", ['any', 'all']) + @pytest.mark.parametrize("index,expected", [ + (pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']), + pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])), + (pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]), + pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])), + (pd.TimedeltaIndex(['1 days', '2 days', '3 days']), + pd.TimedeltaIndex(['1 days', '2 days', '3 days'])), + (pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]), + pd.TimedeltaIndex(['1 days', '2 days', '3 days'])), + (pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'), + pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')), + (pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'), + pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))]) + def test_dropna_dt_like(self, how, index, expected): + result = index.dropna(how=how) + tm.assert_index_equal(result, expected) + def test_dropna_invalid_how_raises(self): msg = "invalid how option: xxx" with tm.assert_raises_regex(ValueError, msg): pd.Index([1, 2, 3]).dropna(how='xxx') def test_get_combined_index(self): result = _get_combined_index([]) - tm.assert_index_equal(result, Index([])) + expected = Index([]) + tm.assert_index_equal(result, expected) def test_repeat(self): repeats = 2 - idx = pd.Index([1, 2, 3]) + index = pd.Index([1, 2, 3]) expected = pd.Index([1, 1, 2, 2, 3, 3]) - result = idx.repeat(repeats) + result = index.repeat(repeats) tm.assert_index_equal(result, expected) + def test_repeat_warns_n_keyword(self): + index = pd.Index([1, 2, 3]) + expected = pd.Index([1, 1, 2, 2, 3, 3]) + with tm.assert_produces_warning(FutureWarning): - result = idx.repeat(n=repeats) - tm.assert_index_equal(result, expected) + result = index.repeat(n=2) - def test_is_monotonic_na(self): - examples = [pd.Index([np.nan]), - pd.Index([np.nan, 1]), - pd.Index([1, 2, np.nan]), - pd.Index(['a', 'b', np.nan]), - pd.to_datetime(['NaT']), - pd.to_datetime(['NaT', '2000-01-01']), - pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), - pd.to_timedelta(['1 day', 'NaT']), ] - for index in examples: - assert not index.is_monotonic_increasing - assert not index.is_monotonic_decreasing - assert not index._is_strictly_monotonic_increasing - assert not index._is_strictly_monotonic_decreasing + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", [ + pd.Index([np.nan]), pd.Index([np.nan, 1]), + pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]), + pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']), + pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), + pd.to_timedelta(['1 day', 'NaT'])]) + def test_is_monotonic_na(self, index): + assert not index.is_monotonic_increasing + assert not index.is_monotonic_decreasing + assert not index._is_strictly_monotonic_increasing + assert not index._is_strictly_monotonic_decreasing def test_repr_summary(self): with cf.option_context('display.max_seq_items', 10): - r = repr(pd.Index(np.arange(1000))) - assert len(r) < 200 - assert "..." in r + result = repr(pd.Index(np.arange(1000))) + assert len(result) < 200 + assert "..." in result - def test_int_name_format(self): + @pytest.mark.parametrize("klass", [Series, DataFrame]) + def test_int_name_format(self, klass): index = Index(['a', 'b', 'c'], name=0) - s = Series(lrange(3), index) - df = DataFrame(lrange(3), index=index) - repr(s) - repr(df) + result = klass(lrange(3), index=index) + assert '0' in repr(result) def test_print_unicode_columns(self): df = pd.DataFrame({u("\u05d0"): [1, 2, 3], @@ -2336,29 +2346,27 @@ def test_print_unicode_columns(self): "c": [7, 8, 9]}) repr(df.columns) # should not raise UnicodeDecodeError - def test_unicode_string_with_unicode(self): - idx = Index(lrange(1000)) - - if PY3: - str(idx) - else: - text_type(idx) + @pytest.mark.parametrize("func,compat_func", [ + (str, text_type), # unicode string + (bytes, str) # byte string + ]) + def test_with_unicode(self, func, compat_func): + index = Index(lrange(1000)) - def test_bytestring_with_unicode(self): - idx = Index(lrange(1000)) if PY3: - bytes(idx) + func(index) else: - str(idx) + compat_func(index) def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] - i1 = Index(dt_dates, dtype=object) - i2 = Index(['aa'], dtype=object) - res = i2.intersection(i1) + index1 = Index(dt_dates, dtype=object) + index2 = Index(['aa'], dtype=object) + result = index2.intersection(index1) - assert len(res) == 0 + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize('op', [operator.eq, operator.ne, operator.gt, operator.ge, @@ -2413,8 +2421,8 @@ def test_generated_op_names(opname, indices): assert method.__name__ == opname -@pytest.mark.parametrize('idx_maker', tm.index_subclass_makers_generator()) -def test_index_subclass_constructor_wrong_kwargs(idx_maker): +@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator()) +def test_index_subclass_constructor_wrong_kwargs(index_maker): # GH #19348 with tm.assert_raises_regex(TypeError, 'unexpected keyword argument'): - idx_maker(foo='bar') + index_maker(foo='bar')
progress towards #20812 This should be it for parametrizing tests. Plan is to look at and clean up fixtures after this
https://api.github.com/repos/pandas-dev/pandas/pulls/20979
2018-05-08T16:27:54Z
2018-05-09T10:22:39Z
2018-05-09T10:22:39Z
2018-05-09T10:25:48Z
COMPAT: 32-bit indexing compat
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 858d08d73e603..2c40be17ce781 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1318,7 +1318,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): (indexer, missing) = labels.get_indexer_non_unique(objarr) # 'indexer' has dupes, create 'check' using 'missing' - check = np.zeros(len(objarr)) + check = np.zeros(len(objarr), dtype=np.intp) check[missing] = -1 mask = check == -1 @@ -2469,7 +2469,7 @@ def maybe_convert_indices(indices, n): if len(indices) == 0: # If list is empty, np.array will return float and cause indexing # errors. - return np.empty(0, dtype=np.int_) + return np.empty(0, dtype=np.intp) mask = indices < 0 if mask.any():
xref #20939
https://api.github.com/repos/pandas-dev/pandas/pulls/20977
2018-05-08T10:28:40Z
2018-05-08T11:39:48Z
2018-05-08T11:39:48Z
2018-05-08T11:40:13Z
BUG: Fix drop_duplicates failure when DataFrame has no column
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ffb2ad046158f..a0559cef6831d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4354,6 +4354,9 @@ def duplicated(self, subset=None, keep='first'): from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT + if self.columns.empty: + return Series() + def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index d1a4a5f615b86..66a5c181b1369 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1522,6 +1522,16 @@ def test_drop_duplicates_with_duplicate_column_names(self): expected1 = df[:2] tm.assert_frame_equal(result1, expected1) + def test_drop_duplicates_with_no_column(self): + # GH20516 + df0 = DataFrame() + result0 = df0.drop_duplicates() + tm.assert_frame_equal(result0, df0) + + df1 = DataFrame(index=[1, 2]) + result1 = df1.drop_duplicates() + tm.assert_frame_equal(result1, df1) + def test_drop_duplicates_for_take_all(self): df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', 'foo', 'bar', 'qux', 'foo'],
- [x] closes #20516 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20974
2018-05-07T15:26:52Z
2018-08-17T10:29:21Z
null
2018-08-17T10:29:21Z
CLN: simplify combine_first
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ffb2ad046158f..d43fb95a70555 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -43,7 +43,6 @@ is_extension_array_dtype, is_datetimetz, is_datetime64_any_dtype, - is_datetime64tz_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, @@ -52,7 +51,6 @@ is_dtype_equal, needs_i8_conversion, _get_dtype_from_object, - _ensure_float, _ensure_float64, _ensure_int64, _ensure_platform_int, @@ -4887,20 +4885,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): else: arr = func(series, otherSeries) - if do_fill: - arr = _ensure_float(arr) - arr[this_mask & other_mask] = np.nan - - # try to downcast back to the original dtype - if needs_i8_conversion_i: - # ToDo: This conversion should be handled in - # _maybe_cast_to_datetime but the change affects lot... - if is_datetime64tz_dtype(new_dtype): - arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz) - else: - arr = maybe_cast_to_datetime(arr, new_dtype) - else: - arr = maybe_downcast_to_dtype(arr, this_dtype) + arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr
https://api.github.com/repos/pandas-dev/pandas/pulls/20972
2018-05-07T10:26:28Z
2018-05-08T00:18:57Z
2018-05-08T00:18:57Z
2018-05-08T00:19:31Z
BUG: Fix isna cannot handle ambiguous typed list
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 05e0028047941..078a733a67a3e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1364,6 +1364,7 @@ Reshaping - Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`) - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`) - Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`) +- Bug in :func:`isna`, which cannot handle ambiguous typed lists (:issue:`20675`) Other ^^^^^ diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 3b2336bf19547..d9dc73434f5ac 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -120,7 +120,9 @@ def _isna_new(obj): return _isna_ndarraylike(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=isna)) - elif isinstance(obj, list) or hasattr(obj, '__array__'): + elif isinstance(obj, list): + return _isna_ndarraylike(np.asarray(obj, dtype=object)) + elif hasattr(obj, '__array__'): return _isna_ndarraylike(np.asarray(obj)) else: return obj is None @@ -146,7 +148,9 @@ def _isna_old(obj): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=_isna_old)) - elif isinstance(obj, list) or hasattr(obj, '__array__'): + elif isinstance(obj, list): + return _isna_ndarraylike_old(np.asarray(obj, dtype=object)) + elif hasattr(obj, '__array__'): return _isna_ndarraylike_old(np.asarray(obj)) else: return obj is None diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 365d8d762d673..ca9a2dc81fcc6 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -118,6 +118,11 @@ def test_isna_lists(self): exp = np.array([False, False]) tm.assert_numpy_array_equal(result, exp) + # GH20675 + result = isna([np.NaN, 'world']) + exp = np.array([True, False]) + tm.assert_numpy_array_equal(result, exp) + def test_isna_nat(self): result = isna([NaT]) exp = np.array([True])
- [x] closes #20675 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20971
2018-05-07T00:34:35Z
2018-05-08T00:06:50Z
2018-05-08T00:06:50Z
2018-05-08T00:06:55Z
Sharey keyword for boxplot
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 97a5975dad9a6..5a1bcce9b5970 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -48,22 +48,26 @@ Bug Fixes ~~~~~~~~~ Groupby/Resample/Rolling +~~~~~~~~~~~~~~~~~~~~~~~~ - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) - Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) - Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True`` Data-type specific +~~~~~~~~~~~~~~~~~~ - Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`) - Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`) - Bug in :func:`pandas.testing.assert_index_equal` which raised ``AssertionError`` incorrectly, when comparing two :class:`CategoricalIndex` objects with param ``check_categorical=False`` (:issue:`19776`) Sparse +~~~~~~ - Bug in :attr:`SparseArray.shape` which previously only returned the shape :attr:`SparseArray.sp_values` (:issue:`21126`) Indexing +~~~~~~~~ - Bug in :meth:`Series.reset_index` where appropriate error was not raised with an invalid level name (:issue:`20925`) - Bug in :func:`interval_range` when ``start``/``periods`` or ``end``/``periods`` are specified with float ``start`` or ``end`` (:issue:`21161`) @@ -71,17 +75,26 @@ Indexing - Bug in :class:`IntervalIndex` constructors where creating an ``IntervalIndex`` from categorical data was not fully supported (:issue:`21243`, issue:`21253`) - Bug in :meth:`MultiIndex.sort_index` which was not guaranteed to sort correctly with ``level=1``; this was also causing data misalignment in particular :meth:`DataFrame.stack` operations (:issue:`20994`, :issue:`20945`, :issue:`21052`) +Plotting +~~~~~~~~ + +- New keywords (sharex, sharey) to turn on/off sharing of x/y-axis by subplots generated with pandas.DataFrame().groupby().boxplot() (:issue: `20968`) + I/O +~~~ - Bug in IO methods specifying ``compression='zip'`` which produced uncompressed zip archives (:issue:`17778`, :issue:`21144`) - Bug in :meth:`DataFrame.to_stata` which prevented exporting DataFrames to buffers and most file-like objects (:issue:`21041`) - Bug in :meth:`read_stata` and :class:`StataReader` which did not correctly decode utf-8 strings on Python 3 from Stata 14 files (dta version 118) (:issue:`21244`) + Reshaping +~~~~~~~~~ - Bug in :func:`concat` where error was raised in concatenating :class:`Series` with numpy scalar and tuple names (:issue:`21015`) - Bug in :func:`concat` warning message providing the wrong guidance for future behavior (:issue:`21101`) Other +~~~~~ - Tab completion on :class:`Index` in IPython no longer outputs deprecation warnings (:issue:`21125`) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c555991ab01c0..8c713548d1ede 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2548,7 +2548,7 @@ def plot_group(group, ax): def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, rot=0, grid=True, ax=None, figsize=None, - layout=None, **kwds): + layout=None, sharex=False, sharey=True, **kwds): """ Make box plots from DataFrameGroupBy data. @@ -2567,6 +2567,14 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, figsize : A tuple (width, height) in inches layout : tuple (optional) (rows, columns) for the layout of the plot + sharex : bool, default False + Whether x-axes will be shared among subplots + + .. versionadded:: 0.23.1 + sharey : bool, default True + Whether y-axes will be shared among subplots + + .. versionadded:: 0.23.1 `**kwds` : Keyword Arguments All other plotting keyword arguments to be passed to matplotlib's boxplot function @@ -2598,7 +2606,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, - ax=ax, sharex=False, sharey=True, + ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) axes = _flatten(axes) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index ac02f5f4e4283..101713b06df8c 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -40,6 +40,14 @@ def setup_method(self, method): "C": np.arange(20) + np.random.uniform( size=20)}) + def _assert_ytickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + self._check_visible(ax.get_yticklabels(), visible=exp) + + def _assert_xtickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + self._check_visible(ax.get_xticklabels(), visible=exp) + @pytest.mark.slow def test_plot(self): df = self.tdf @@ -367,6 +375,57 @@ def test_subplots(self): for ax in axes: assert ax.get_legend() is None + def test_groupby_boxplot_sharey(self): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharey can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14], + 'b': [0.56, 0.84, 0.29, 0.56, 0.85], + 'c': [0, 1, 2, 3, 1]}, + index=[0, 1, 2, 3, 4]) + + # behavior without keyword + axes = df.groupby('c').boxplot() + expected = [True, False, True, False] + self._assert_ytickslabels_visibility(axes, expected) + + # set sharey=True should be identical + axes = df.groupby('c').boxplot(sharey=True) + expected = [True, False, True, False] + self._assert_ytickslabels_visibility(axes, expected) + + # sharey=False, all yticklabels should be visible + axes = df.groupby('c').boxplot(sharey=False) + expected = [True, True, True, True] + self._assert_ytickslabels_visibility(axes, expected) + + def test_groupby_boxplot_sharex(self): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharex can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14], + 'b': [0.56, 0.84, 0.29, 0.56, 0.85], + 'c': [0, 1, 2, 3, 1]}, + index=[0, 1, 2, 3, 4]) + + # behavior without keyword + axes = df.groupby('c').boxplot() + expected = [True, True, True, True] + self._assert_xtickslabels_visibility(axes, expected) + + # set sharex=False should be identical + axes = df.groupby('c').boxplot(sharex=False) + expected = [True, True, True, True] + self._assert_xtickslabels_visibility(axes, expected) + + # sharex=True, yticklabels should be visible + # only for bottom plots + axes = df.groupby('c').boxplot(sharex=True) + expected = [False, False, True, True] + self._assert_xtickslabels_visibility(axes, expected) + @pytest.mark.slow def test_subplots_timeseries(self): idx = date_range(start='2014-07-01', freq='M', periods=10)
…to _subplots(). .. code-block:: jupyter-notebook %pylab inline import pandas as pd N = 100 rand = random.random(N) clas = random.binomial(5,.5, N) df = pd.DataFrame({'Rand': rand-clas, 'Rand2': rand, 'Class': clas}, index= np.arange(N)) df.groupby('Class').boxplot(sharey=True, sharex=False) >>> TypeError: boxplot() got an unexpected keyword argument 'sharey' New Behavior: .. ipython:: jpyter-notebook: ... df.groupby('Class').boxplot(sharey=True, sharex=True) df.groupby('Class').boxplot(sharey=True, sharex=False) df.groupby('Class').boxplot(sharey=False, sharex=True) df.groupby('Class').boxplot(sharey=False, sharex=False) All leads to different behaviour. The shareing of axes both x and y can be turned on and off separately. To restore previous behavior, use boxplot() without keywords. Default is the previous behavior of sharey - [x ] closes #20918 - [x ] tests added / passed - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20968
2018-05-06T17:03:31Z
2018-06-08T11:27:30Z
2018-06-08T11:27:29Z
2018-06-08T16:25:29Z
BUG: Fix wrong khash method definition
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index af1bee435eead..940505c01d6c6 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1254,6 +1254,7 @@ Indexing - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) - Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) +- Bug in ``.loc`` with a ``uint64`` indexer (:issue:`20722`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd index b1d965c3618cd..4c00e273b33b7 100644 --- a/pandas/_libs/khash.pxd +++ b/pandas/_libs/khash.pxd @@ -84,9 +84,9 @@ cdef extern from "khash_python.h": kh_uint64_t* kh_init_uint64() nogil void kh_destroy_uint64(kh_uint64_t*) nogil void kh_clear_uint64(kh_uint64_t*) nogil - khint_t kh_get_uint64(kh_uint64_t*, int64_t) nogil + khint_t kh_get_uint64(kh_uint64_t*, uint64_t) nogil void kh_resize_uint64(kh_uint64_t*, khint_t) nogil - khint_t kh_put_uint64(kh_uint64_t*, int64_t, int*) nogil + khint_t kh_put_uint64(kh_uint64_t*, uint64_t, int*) nogil void kh_del_uint64(kh_uint64_t*, khint_t) nogil bint kh_exist_uint64(kh_uint64_t*, khiter_t) nogil diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 6ccff7e898a6a..2e52154d7679b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -784,3 +784,22 @@ def convert_nested_indexer(indexer_type, keys): index=pd.MultiIndex.from_product(keys)) tm.assert_series_equal(result, expected) + + def test_loc_uint64(self): + # GH20722 + # Test whether loc accept uint64 max value as index. + s = pd.Series([1, 2], + index=[np.iinfo('uint64').max - 1, + np.iinfo('uint64').max]) + + result = s.loc[np.iinfo('uint64').max - 1] + expected = s.iloc[0] + assert result == expected + + result = s.loc[[np.iinfo('uint64').max - 1]] + expected = s.iloc[[0]] + tm.assert_series_equal(result, expected) + + result = s.loc[[np.iinfo('uint64').max - 1, + np.iinfo('uint64').max]] + tm.assert_series_equal(result, s)
- [x] closes #20722 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20966
2018-05-06T08:01:05Z
2018-05-08T10:35:11Z
2018-05-08T10:35:10Z
2018-05-08T10:35:17Z
BUG: Fix combine_first converts other columns type into floats unexpectedly
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 91575c311b409..83bd8ee9b3c74 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -867,3 +867,4 @@ Other - :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`) - :meth:`~pandas.io.formats.style.Styler.bar` now also supports tablewise application (in addition to rowwise and columnwise) with ``axis=None`` and setting clipping range with ``vmin`` and ``vmax`` (:issue:`21548` and :issue:`21526`). ``NaN`` values are also handled properly. - Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`) +- Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f4b7ccb0fdf5b..6b6d0e9be931d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5072,9 +5072,17 @@ def combine(self, other, func, fill_value=None, overwrite=True): series[this_mask] = fill_value otherSeries[other_mask] = fill_value - # if we have different dtypes, possibly promote - new_dtype = this_dtype - if not is_dtype_equal(this_dtype, other_dtype): + if col not in self.columns: + # If self DataFrame does not have col in other DataFrame, + # try to promote series, which is all NaN, as other_dtype. + new_dtype = other_dtype + try: + series = series.astype(new_dtype, copy=False) + except ValueError: + # e.g. new_dtype is integer types + pass + else: + # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) @@ -5153,6 +5161,11 @@ def combiner(x, y, needs_i8_conversion=False): else: mask = isna(x_values) + # If the column y in other DataFrame is not in first DataFrame, + # just return y_values. + if y.name not in self.columns: + return y_values + return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index 15ca65395e4fc..d1f921bc5e894 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -4,6 +4,7 @@ from datetime import datetime +import pytest import numpy as np from numpy import nan @@ -750,6 +751,17 @@ def test_combine_first_int(self): tm.assert_frame_equal(res, df1) assert res['a'].dtype == 'int64' + @pytest.mark.parametrize("val", [1, 1.0]) + def test_combine_first_with_asymmetric_other(self, val): + # see gh-20699 + df1 = pd.DataFrame({'isNum': [val]}) + df2 = pd.DataFrame({'isBool': [True]}) + + res = df1.combine_first(df2) + exp = pd.DataFrame({'isBool': [True], 'isNum': [val]}) + + tm.assert_frame_equal(res, exp) + def test_concat_datetime_datetime64_frame(self): # #2624 rows = []
- [x] closes #20699 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20965
2018-05-06T06:54:02Z
2018-10-09T12:54:03Z
2018-10-09T12:54:03Z
2018-10-09T12:54:35Z
PERF: removed coercion to int64 in Categorical.from_codes
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f91782459df67..d540a114749bb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -578,7 +578,7 @@ def from_codes(cls, codes, categories, ordered=False): unordered. """ try: - codes = np.asarray(codes, np.int64) + codes = np.asarray(codes) except (ValueError, TypeError): raise ValueError( "codes need to be convertible to an arrays of integers")
- [x] closes #18501 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20961
2018-05-05T17:51:17Z
2018-05-05T19:49:59Z
null
2018-05-10T15:37:06Z
DOC: add reshaping visuals to the docs (Reshaping and Pivot Tables)
diff --git a/doc/source/_static/reshaping_melt.png b/doc/source/_static/reshaping_melt.png new file mode 100644 index 0000000000000..d0c4e77655e60 Binary files /dev/null and b/doc/source/_static/reshaping_melt.png differ diff --git a/doc/source/_static/reshaping_pivot.png b/doc/source/_static/reshaping_pivot.png new file mode 100644 index 0000000000000..c6c37a80744d4 Binary files /dev/null and b/doc/source/_static/reshaping_pivot.png differ diff --git a/doc/source/_static/reshaping_stack.png b/doc/source/_static/reshaping_stack.png new file mode 100644 index 0000000000000..924f916ae0d37 Binary files /dev/null and b/doc/source/_static/reshaping_stack.png differ diff --git a/doc/source/_static/reshaping_unstack.png b/doc/source/_static/reshaping_unstack.png new file mode 100644 index 0000000000000..3e14cdd1ee1f7 Binary files /dev/null and b/doc/source/_static/reshaping_unstack.png differ diff --git a/doc/source/_static/reshaping_unstack_0.png b/doc/source/_static/reshaping_unstack_0.png new file mode 100644 index 0000000000000..eceddf73eea9e Binary files /dev/null and b/doc/source/_static/reshaping_unstack_0.png differ diff --git a/doc/source/_static/reshaping_unstack_1.png b/doc/source/_static/reshaping_unstack_1.png new file mode 100644 index 0000000000000..ab0ae3796dcc1 Binary files /dev/null and b/doc/source/_static/reshaping_unstack_1.png differ diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 71ddaa13fdd8a..250a1808e496e 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -60,6 +60,8 @@ To select out everything for variable ``A`` we could do: df[df['variable'] == 'A'] +.. image:: _static/reshaping_pivot.png + But suppose we wish to do time series operations with the variables. A better representation would be where the ``columns`` are the unique variables and an ``index`` of dates identifies individual observations. To reshape the data into @@ -96,10 +98,12 @@ are homogeneously-typed. Reshaping by stacking and unstacking ------------------------------------ -Closely related to the :meth:`~DataFrame.pivot` method are the related -:meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on -``Series`` and ``DataFrame``. These methods are designed to work together with -``MultiIndex`` objects (see the section on :ref:`hierarchical indexing +.. image:: _static/reshaping_stack.png + +Closely related to the :meth:`~DataFrame.pivot` method are the related +:meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on +``Series`` and ``DataFrame``. These methods are designed to work together with +``MultiIndex`` objects (see the section on :ref:`hierarchical indexing <advanced.hierarchical>`). Here are essentially what these methods do: - ``stack``: "pivot" a level of the (possibly hierarchical) column labels, @@ -109,6 +113,8 @@ Closely related to the :meth:`~DataFrame.pivot` method are the related (possibly hierarchical) row index to the column axis, producing a reshaped ``DataFrame`` with a new inner-most level of column labels. +.. image:: _static/reshaping_unstack.png + The clearest way to explain is by example. Let's take a prior example data set from the hierarchical indexing section: @@ -149,6 +155,8 @@ unstacks the **last level**: .. _reshaping.unstack_by_name: +.. image:: _static/reshaping_unstack_1.png + If the indexes have names, you can use the level names instead of specifying the level numbers: @@ -156,6 +164,9 @@ the level numbers: stacked.unstack('second') + +.. image:: _static/reshaping_unstack_0.png + Notice that the ``stack`` and ``unstack`` methods implicitly sort the index levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa, will result in a **sorted** copy of the original ``DataFrame`` or ``Series``: @@ -266,11 +277,13 @@ the right thing: Reshaping by Melt ----------------- +.. image:: _static/reshaping_melt.png + The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt` -are useful to massage a ``DataFrame`` into a format where one or more columns -are *identifier variables*, while all other columns, considered *measured -variables*, are "unpivoted" to the row axis, leaving just two non-identifier -columns, "variable" and "value". The names of those columns can be customized +are useful to massage a ``DataFrame`` into a format where one or more columns +are *identifier variables*, while all other columns, considered *measured +variables*, are "unpivoted" to the row axis, leaving just two non-identifier +columns, "variable" and "value". The names of those columns can be customized by supplying the ``var_name`` and ``value_name`` parameters. For instance, @@ -285,7 +298,7 @@ For instance, cheese.melt(id_vars=['first', 'last']) cheese.melt(id_vars=['first', 'last'], var_name='quantity') -Another way to transform is to use the :func:`~pandas.wide_to_long` panel data +Another way to transform is to use the :func:`~pandas.wide_to_long` panel data convenience function. It is less flexible than :func:`~pandas.melt`, but more user-friendly. @@ -332,8 +345,8 @@ While :meth:`~DataFrame.pivot` provides general purpose pivoting with various data types (strings, numerics, etc.), pandas also provides :func:`~pandas.pivot_table` for pivoting with aggregation of numeric data. -The function :func:`~pandas.pivot_table` can be used to create spreadsheet-style -pivot tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced +The function :func:`~pandas.pivot_table` can be used to create spreadsheet-style +pivot tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced strategies. It takes a number of arguments: @@ -485,7 +498,7 @@ using the ``normalize`` argument: pd.crosstab(df.A, df.B, normalize='columns') ``crosstab`` can also be passed a third ``Series`` and an aggregation function -(``aggfunc``) that will be applied to the values of the third ``Series`` within +(``aggfunc``) that will be applied to the values of the third ``Series`` within each group defined by the first two ``Series``: .. ipython:: python @@ -508,8 +521,8 @@ Finally, one can also add margins or normalize this output. Tiling ------ -The :func:`~pandas.cut` function computes groupings for the values of the input -array and is often used to transform continuous variables to discrete or +The :func:`~pandas.cut` function computes groupings for the values of the input +array and is often used to transform continuous variables to discrete or categorical variables: .. ipython:: python @@ -539,8 +552,8 @@ used to bin the passed data.:: Computing indicator / dummy variables ------------------------------------- -To convert a categorical variable into a "dummy" or "indicator" ``DataFrame``, -for example a column in a ``DataFrame`` (a ``Series``) which has ``k`` distinct +To convert a categorical variable into a "dummy" or "indicator" ``DataFrame``, +for example a column in a ``DataFrame`` (a ``Series``) which has ``k`` distinct values, can derive a ``DataFrame`` containing ``k`` columns of 1s and 0s using :func:`~pandas.get_dummies`: @@ -577,7 +590,7 @@ This function is often used along with discretization functions like ``cut``: See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. :func:`get_dummies` also accepts a ``DataFrame``. By default all categorical -variables (categorical in the statistical sense, those with `object` or +variables (categorical in the statistical sense, those with `object` or `categorical` dtype) are encoded as dummy variables. @@ -587,7 +600,7 @@ variables (categorical in the statistical sense, those with `object` or 'C': [1, 2, 3]}) pd.get_dummies(df) -All non-object columns are included untouched in the output. You can control +All non-object columns are included untouched in the output. You can control the columns that are encoded with the ``columns`` keyword. .. ipython:: python @@ -640,7 +653,7 @@ When a column contains only one level, it will be omitted in the result. pd.get_dummies(df, drop_first=True) -By default new columns will have ``np.uint8`` dtype. +By default new columns will have ``np.uint8`` dtype. To choose another dtype, use the``dtype`` argument: .. ipython:: python
- [x] closes #20898 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20960
2018-05-05T16:44:22Z
2018-05-08T20:44:01Z
2018-05-08T20:44:01Z
2018-05-08T20:44:04Z
BUG in .groupby.apply when applying a function that has mixed data types and the user supplied function can fail on the grouping column
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index eb6c212731822..a77620fe6b36b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1320,6 +1320,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.resample` that dropped timezone information (:issue:`13238`) - Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) - Bug in :func:`DataFrame.resample` where ``ffill``, ``bfill``, ``pad``, ``backfill``, ``fillna``, ``interpolate``, and ``asfreq`` were ignoring ``loffset``. (:issue:`20744`) +- Bug in :func:`DataFrame.groupby` when applying a function that has mixed data types and the user supplied function can fail on the grouping column (:issue:`20949`) Sparse ^^^^^^ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 164d1bebd2929..df7a5dc9dc173 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -6,6 +6,7 @@ import warnings import copy from textwrap import dedent +from contextlib import contextmanager from pandas.compat import ( zip, range, lzip, @@ -549,6 +550,16 @@ def f(self): return attr +@contextmanager +def _group_selection_context(groupby): + """ + set / reset the _group_selection_context + """ + groupby._set_group_selection() + yield groupby + groupby._reset_group_selection() + + class _GroupBy(PandasObject, SelectionMixin): _group_selection = None _apply_whitelist = frozenset([]) @@ -696,26 +707,32 @@ def _reset_group_selection(self): each group regardless of whether a group selection was previously set. """ if self._group_selection is not None: - self._group_selection = None # GH12839 clear cached selection too when changing group selection + self._group_selection = None self._reset_cache('_selected_obj') def _set_group_selection(self): """ Create group based selection. Used when selection is not passed directly but instead via a grouper. + + NOTE: this should be paired with a call to _reset_group_selection """ grp = self.grouper - if self.as_index and getattr(grp, 'groupings', None) is not None and \ - self.obj.ndim > 1: - ax = self.obj._info_axis - groupers = [g.name for g in grp.groupings - if g.level is None and g.in_axis] + if not (self.as_index and + getattr(grp, 'groupings', None) is not None and + self.obj.ndim > 1 and + self._group_selection is None): + return + + ax = self.obj._info_axis + groupers = [g.name for g in grp.groupings + if g.level is None and g.in_axis] - if len(groupers): - self._group_selection = ax.difference(Index(groupers)).tolist() - # GH12839 clear selected obj cache when group selection changes - self._reset_cache('_selected_obj') + if len(groupers): + # GH12839 clear selected obj cache when group selection changes + self._group_selection = ax.difference(Index(groupers)).tolist() + self._reset_cache('_selected_obj') def _set_result_index_ordered(self, result): # set the result index on the passed values object and @@ -781,10 +798,10 @@ def _make_wrapper(self, name): type(self).__name__)) raise AttributeError(msg) - # need to setup the selection - # as are not passed directly but in the grouper self._set_group_selection() + # need to setup the selection + # as are not passed directly but in the grouper f = getattr(self._selected_obj, name) if not isinstance(f, types.MethodType): return self.apply(lambda self: getattr(self, name)) @@ -897,7 +914,22 @@ def f(g): # ignore SettingWithCopy here in case the user mutates with option_context('mode.chained_assignment', None): - return self._python_apply_general(f) + try: + result = self._python_apply_general(f) + except Exception: + + # gh-20949 + # try again, with .apply acting as a filtering + # operation, by excluding the grouping column + # This would normally not be triggered + # except if the udf is trying an operation that + # fails on *some* columns, e.g. a numeric operation + # on a string grouper column + + with _group_selection_context(self): + return self._python_apply_general(f) + + return result def _python_apply_general(self, f): keys, values, mutated = self.grouper.apply(f, self._selected_obj, @@ -1275,9 +1307,9 @@ def mean(self, *args, **kwargs): except GroupByError: raise except Exception: # pragma: no cover - self._set_group_selection() - f = lambda x: x.mean(axis=self.axis, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + f = lambda x: x.mean(axis=self.axis, **kwargs) + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1293,13 +1325,12 @@ def median(self, **kwargs): raise except Exception: # pragma: no cover - self._set_group_selection() - def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1336,9 +1367,9 @@ def var(self, ddof=1, *args, **kwargs): if ddof == 1: return self._cython_agg_general('var', **kwargs) else: - self._set_group_selection() f = lambda x: x.var(ddof=ddof, **kwargs) - return self._python_agg_general(f) + with _group_selection_context(self): + return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) @@ -1384,6 +1415,7 @@ def f(self, **kwargs): kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count + self._set_group_selection() try: return self._cython_agg_general( @@ -1453,11 +1485,11 @@ def ohlc(self): @Appender(DataFrame.describe.__doc__) def describe(self, **kwargs): - self._set_group_selection() - result = self.apply(lambda x: x.describe(**kwargs)) - if self.axis == 1: - return result.T - return result.unstack() + with _group_selection_context(self): + result = self.apply(lambda x: x.describe(**kwargs)) + if self.axis == 1: + return result.T + return result.unstack() @Substitution(name='groupby') @Appender(_doc_template) @@ -1778,13 +1810,12 @@ def ngroup(self, ascending=True): .cumcount : Number the rows in each group. """ - self._set_group_selection() - - index = self._selected_obj.index - result = Series(self.grouper.group_info[0], index) - if not ascending: - result = self.ngroups - 1 - result - return result + with _group_selection_context(self): + index = self._selected_obj.index + result = Series(self.grouper.group_info[0], index) + if not ascending: + result = self.ngroups - 1 - result + return result @Substitution(name='groupby') def cumcount(self, ascending=True): @@ -1835,11 +1866,10 @@ def cumcount(self, ascending=True): .ngroup : Number the groups themselves. """ - self._set_group_selection() - - index = self._selected_obj.index - cumcounts = self._cumcount_array(ascending=ascending) - return Series(cumcounts, index) + with _group_selection_context(self): + index = self._selected_obj.index + cumcounts = self._cumcount_array(ascending=ascending) + return Series(cumcounts, index) @Substitution(name='groupby') @Appender(_doc_template) @@ -3768,7 +3798,6 @@ def nunique(self, dropna=True): @Appender(Series.describe.__doc__) def describe(self, **kwargs): - self._set_group_selection() result = self.apply(lambda x: x.describe(**kwargs)) if self.axis == 1: return result.T @@ -4411,6 +4440,7 @@ def transform(self, func, *args, **kwargs): return self._transform_general(func, *args, **kwargs) obj = self._obj_with_exclusions + # nuiscance columns if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 5ca10fe1af9d1..07eef2d87feb3 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -515,3 +515,16 @@ def test_func(x): index=index2) tm.assert_frame_equal(result1, expected1) tm.assert_frame_equal(result2, expected2) + + +def test_apply_with_mixed_types(): + # gh-20949 + df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1, 2, 3], 'C': [4, 6, 5]}) + g = df.groupby('A') + + result = g.transform(lambda x: x / x.sum()) + expected = pd.DataFrame({'B': [1 / 3., 2 / 3., 1], 'C': [0.4, 0.6, 1.0]}) + tm.assert_frame_equal(result, expected) + + result = g.apply(lambda x: x / x.sum()) + tm.assert_frame_equal(result, expected)
closes #20949
https://api.github.com/repos/pandas-dev/pandas/pulls/20959
2018-05-05T14:29:27Z
2018-05-08T00:19:52Z
2018-05-08T00:19:52Z
2018-05-08T00:20:14Z
ENH: Return DatetimeIndex or TimedeltaIndex bins for q/cut when input is datelike
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index dfb7a3675fdd5..abfa57dc09334 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -161,6 +161,7 @@ Datetimelike API Changes - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with non-``None`` ``freq`` attribute, addition or subtraction of integer-dtyped array or ``Index`` will return an object of the same class (:issue:`19959`) - :class:`DateOffset` objects are now immutable. Attempting to alter one of these will now raise ``AttributeError`` (:issue:`21341`) - :class:`PeriodIndex` subtraction of another ``PeriodIndex`` will now return an object-dtype :class:`Index` of :class:`DateOffset` objects instead of raising a ``TypeError`` (:issue:`20049`) +- :func:`cut` and :func:`qcut` now returns a :class:`DatetimeIndex` or :class:`TimedeltaIndex` bins when the input is datetime or timedelta dtype respectively and ``retbins=True`` (:issue:`19891`) .. _whatsnew_0240.api.other: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 8bbf939e110e9..863ebc6354136 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -11,6 +11,7 @@ is_datetime64_dtype, is_timedelta64_dtype, is_datetime64tz_dtype, + is_datetime_or_timedelta_dtype, _ensure_int64) import pandas.core.algorithms as algos @@ -18,7 +19,7 @@ from pandas._libs.lib import infer_dtype from pandas import (to_timedelta, to_datetime, Categorical, Timestamp, Timedelta, - Series, Interval, IntervalIndex) + Series, Index, Interval, IntervalIndex) import numpy as np @@ -364,6 +365,8 @@ def _bins_to_cuts(x, bins, right=True, labels=None, result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) + bins = _convert_bin_to_datelike_type(bins, dtype) + return result, bins @@ -428,6 +431,26 @@ def _convert_bin_to_numeric_type(bins, dtype): return bins +def _convert_bin_to_datelike_type(bins, dtype): + """ + Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is + datelike + + Parameters + ---------- + bins : list-like of bins + dtype : dtype of data + + Returns + ------- + bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is + datelike + """ + if is_datetime64tz_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype): + bins = Index(bins.astype(np.int64), dtype=dtype) + return bins + + def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 807fb2530603a..44de3e93d42bf 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -7,7 +7,8 @@ import pandas as pd from pandas import (DataFrame, Series, isna, to_datetime, DatetimeIndex, Index, Timestamp, Interval, IntervalIndex, Categorical, - cut, qcut, date_range, NaT, TimedeltaIndex) + cut, qcut, date_range, timedelta_range, NaT, + TimedeltaIndex) from pandas.tseries.offsets import Nano, Day import pandas.util.testing as tm from pandas.api.types import CategoricalDtype as CDT @@ -605,3 +606,38 @@ def f(): mask = result.isna() tm.assert_numpy_array_equal( mask, np.array([False, True, True, True, True])) + + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Pacific']) + def test_datetime_cut_roundtrip(self, tz): + # GH 19891 + s = Series(date_range('20180101', periods=3, tz=tz)) + result, result_bins = cut(s, 2, retbins=True) + expected = cut(s, result_bins) + tm.assert_series_equal(result, expected) + expected_bins = DatetimeIndex(['2017-12-31 23:57:07.200000', + '2018-01-02 00:00:00', + '2018-01-03 00:00:00']) + expected_bins = expected_bins.tz_localize(tz) + tm.assert_index_equal(result_bins, expected_bins) + + def test_timedelta_cut_roundtrip(self): + # GH 19891 + s = Series(timedelta_range('1day', periods=3)) + result, result_bins = cut(s, 2, retbins=True) + expected = cut(s, result_bins) + tm.assert_series_equal(result, expected) + expected_bins = TimedeltaIndex(['0 days 23:57:07.200000', + '2 days 00:00:00', + '3 days 00:00:00']) + tm.assert_index_equal(result_bins, expected_bins) + + @pytest.mark.parametrize('arg, expected_bins', [ + [timedelta_range('1day', periods=3), + TimedeltaIndex(['1 days', '2 days', '3 days'])], + [date_range('20180101', periods=3), + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'])]]) + def test_datelike_qcut_bins(self, arg, expected_bins): + # GH 19891 + s = Series(arg) + result, result_bins = qcut(s, 2, retbins=True) + tm.assert_index_equal(result_bins, expected_bins)
- [x] closes #19891 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20956
2018-05-05T05:29:13Z
2018-07-03T15:34:15Z
2018-07-03T15:34:15Z
2018-07-03T16:41:34Z
Add pip 9.0.3 to conda environment to avoid numpy 1.9.3 error
diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml index c3d3d59f895c6..91ee11f73f682 100644 --- a/ci/environment-dev.yaml +++ b/ci/environment-dev.yaml @@ -12,3 +12,4 @@ dependencies: - pytz - setuptools - sphinx + - pip==9.0.3
I obtain this error: 'No matching distribution found for numpy==1.9.3'. See #20952 for more details. This change works for me. Closes #20952 . - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20953
2018-05-04T14:31:15Z
2018-05-04T15:54:51Z
null
2018-05-04T16:33:30Z
CLN: add missing space in package's description
diff --git a/setup.py b/setup.py index a436f451a2a55..6febe674fb2a1 100755 --- a/setup.py +++ b/setup.py @@ -134,7 +134,7 @@ def build_extensions(self): _build_ext.build_extensions(self) -DESCRIPTION = ("Powerful data structures for data analysis, time series," +DESCRIPTION = ("Powerful data structures for data analysis, time series, " "and statistics") LONG_DESCRIPTION = """ **pandas** is a Python package providing fast, flexible, and expressive data
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry The typo fixed here is in the package description, so it shows up in `pip show`. I'd say this fix needs no issue or whatsnew entry; if you feel otherwise, please say so and I'll open an issue and resubmit with a whatsnew entry. ``` $ pip show pandas Name: pandas Version: 0.22.0 Summary: Powerful data structures for data analysis, time series,and statistics Home-page: http://pandas.pydata.org Author: The PyData Development Team Author-email: pydata@googlegroups.com [...] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20950
2018-05-04T11:53:49Z
2018-05-05T12:43:45Z
2018-05-05T12:43:45Z
2018-05-05T12:43:48Z
Allow drop bins when using the cut function
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6036ef7e221fb..750227cd59f26 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -525,6 +525,7 @@ Other Enhancements library. (:issue:`20564`) - Added new writer for exporting Stata dta files in version 117, ``StataWriter117``. This format supports exporting strings with lengths up to 2,000,000 characters (:issue:`16450`) - :func:`to_hdf` and :func:`read_hdf` now accept an ``errors`` keyword argument to control encoding error handling (:issue:`20835`) +- :func:`cut` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`20947`) - :func:`date_range` now returns a linearly spaced ``DatetimeIndex`` if ``start``, ``stop``, and ``periods`` are specified, but ``freq`` is not. (:issue:`20808`, :issue:`20983`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 118198ea0320d..8bbf939e110e9 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -24,7 +24,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, - include_lowest=False): + include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. @@ -65,6 +65,10 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + + .. versionadded:: 0.23.0 Returns ------- @@ -85,7 +89,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed - bins. For an IntervalIndex `bins`, this is equal to `bins`. + bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For + an IntervalIndex `bins`, this is equal to `bins`. See Also -------- @@ -144,6 +149,32 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... + Passing a Series as an input returns a Series with mapping value. + It is used to map numerically to intervals based on bins. + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) + ... # doctest: +ELLIPSIS + (a 0.0 + b 1.0 + c 2.0 + d 3.0 + e 4.0 + dtype: float64, array([0, 2, 4, 6, 8])) + + Use `drop` optional when bins is not unique + + >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, + ... right=False, duplicates='drop') + ... # doctest: +ELLIPSIS + (a 0.0 + b 1.0 + c 2.0 + d 3.0 + e 3.0 + dtype: float64, array([0, 2, 4, 6, 8])) + Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 @@ -199,7 +230,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, - dtype=dtype) + dtype=dtype, + duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 8d093f2784ba1..5ea27f9e34e1c 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -4,6 +4,7 @@ import numpy as np from pandas.compat import zip +import pandas as pd from pandas import (DataFrame, Series, isna, to_datetime, DatetimeIndex, Index, Timestamp, Interval, IntervalIndex, Categorical, cut, qcut, date_range, NaT, TimedeltaIndex) @@ -337,6 +338,21 @@ def test_series_retbins(self): CDT(ordered=True)) tm.assert_series_equal(result, expected) + def test_cut_duplicates_bin(self): + # issue 20947 + values = Series(np.array([1, 3, 5, 7, 9]), + index=["a", "b", "c", "d", "e"]) + bins = [0, 2, 4, 6, 10, 10] + result = cut(values, bins, duplicates='drop') + expected = cut(values, pd.unique(bins)) + tm.assert_series_equal(result, expected) + + pytest.raises(ValueError, cut, values, bins) + pytest.raises(ValueError, cut, values, bins, duplicates='raise') + + # invalid + pytest.raises(ValueError, cut, values, bins, duplicates='foo') + def test_qcut_duplicates_bin(self): # GH 7751 values = [0, 0, 0, 0, 1, 2, 3]
- [ ] if cut(x, bins=[0, 1, 2, 3, 4, 5, 6, 6, 8, 8, 10], labels=False, retbins=True, right=False) will raise ValueError: You can drop duplicate edges by setting the 'duplicates' kwarg, so add 'duplicates' parameters to the cut function.
https://api.github.com/repos/pandas-dev/pandas/pulls/20947
2018-05-04T02:48:21Z
2018-05-10T18:27:48Z
2018-05-10T18:27:48Z
2018-05-11T02:11:25Z
BUG: Fix IntervalIndex.get_loc/get_indexer for IntervalIndex of length one
diff --git a/doc/source/api.rst b/doc/source/api.rst index 93edd090d846b..d00e5511f1100 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1632,6 +1632,8 @@ IntervalIndex Components IntervalIndex.length IntervalIndex.values IntervalIndex.is_non_overlapping_monotonic + IntervalIndex.get_loc + IntervalIndex.get_indexer .. _api.multiindex: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4ad40fe0f7f2b..09ca4f403399b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1245,6 +1245,7 @@ Indexing - Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`) - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) +- Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 54800d0d76d2e..766ac7b14120e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -159,20 +159,22 @@ class IntervalIndex(IntervalMixin, Index): Attributes ---------- - left - right closed - mid + is_non_overlapping_monotonic + left length + mid + right values - is_non_overlapping_monotonic Methods ------- + contains from_arrays - from_tuples from_breaks - contains + from_tuples + get_indexer + get_loc Examples --------- @@ -938,8 +940,11 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): if isinstance(label, IntervalMixin): raise NotImplementedError + # GH 20921: "not is_monotonic_increasing" for the second condition + # instead of "is_monotonic_decreasing" to account for single element + # indexes being both increasing and decreasing if ((side == 'left' and self.left.is_monotonic_increasing) or - (side == 'right' and self.left.is_monotonic_decreasing)): + (side == 'right' and not self.left.is_monotonic_increasing)): sub_idx = self.right if self.open_right or exclude_label: label = _get_next_label(label) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 71a6f78125004..9920809a18a24 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -497,6 +497,14 @@ def test_get_loc_interval(self): pytest.raises(KeyError, self.index.get_loc, Interval(-1, 0, 'left')) + # Make consistent with test_interval_new.py (see #16316, #16386) + @pytest.mark.parametrize('item', [3, Interval(1, 4)]) + def test_get_loc_length_one(self, item, closed): + # GH 20921 + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + result = index.get_loc(item) + assert result == 0 + # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_get_indexer(self): actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) @@ -544,6 +552,16 @@ def test_get_indexer_subintervals(self): expected = np.array([0, 0, 0], dtype='intp') tm.assert_numpy_array_equal(actual, expected) + # Make consistent with test_interval_new.py (see #16316, #16386) + @pytest.mark.parametrize('item', [ + [3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)]) + def test_get_indexer_length_one(self, item, closed): + # GH 17284 + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + result = index.get_indexer(item) + expected = np.array([0] * len(item), dtype='intp') + tm.assert_numpy_array_equal(result, expected) + # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_contains(self): # Only endpoints are valid.
- [X] closes #17284 - [X] closes #20921 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20946
2018-05-04T00:25:09Z
2018-05-05T12:53:37Z
2018-05-05T12:53:36Z
2018-05-05T19:17:56Z
Update link to NumPy Docstring Standard explanation
diff --git a/doc/README.rst b/doc/README.rst index efa21fdd3a2d9..12950d323f5d3 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -42,7 +42,7 @@ Some other important things to know about the docs: - The docstrings follow the **Numpy Docstring Standard** which is used widely in the Scientific Python community. This standard specifies the format of the different sections of the docstring. See `this document - <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_ + <https://numpydoc.readthedocs.io/en/latest/>`_ for a detailed explanation, or look at some of the existing functions to extend it in a similar manner.
- [ ] whatsnew Updated link to NumPy DocString from `https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt` to `https://numpydoc.readthedocs.io/en/latest/`
https://api.github.com/repos/pandas-dev/pandas/pulls/20942
2018-05-03T10:19:32Z
2018-05-03T10:22:30Z
2018-05-03T10:22:30Z
2018-05-03T10:22:33Z
DOC: followup to #20583, observed kwarg for .groupby
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 3616a7e1b41d2..da13a34cccfea 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -994,7 +994,7 @@ is only interesting over one column (here ``colname``), it may be filtered Handling of (un)observed Categorical values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When using a ``Categorical`` grouper (as a single or as part of multipler groupers), the ``observed`` keyword +When using a ``Categorical`` grouper (as a single grouper, or as part of multipler groupers), the ``observed`` keyword controls whether to return a cartesian product of all possible groupers values (``observed=False``) or only those that are observed groupers (``observed=True``). diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4ad40fe0f7f2b..8f1ec0c108ee1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -419,9 +419,11 @@ documentation. If you build an extension array, publicize it on our Categorical Groupers has gained an observed keyword ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In previous versions, grouping by 1 or more categorical columns would result in an index that was the cartesian product of all of the categories for -each grouper, not just the observed values.``.groupby()`` has gained the ``observed`` keyword to toggle this behavior. The default remains backward -compatible (generate a cartesian product). (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`) +Grouping by a categorical includes the unobserved categories in the output. +When grouping with multiple groupers, this means you get the cartesian product of all the +categories, including combinations where there are no observations, which can result in a large +number of groupers. We have added a keyword ``observed`` to control this behavior, it defaults to +``observed=False`` for backward-compatiblity. (:issue:`14942`, :issue:`8138`, :issue:`15217`, :issue:`17594`, :issue:`8669`, :issue:`20583`, :issue:`20902`) .. ipython:: python diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e96a2a9f08520..343f36eabc0d7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6584,7 +6584,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): axis=axis, inplace=inplace) def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, - group_keys=True, squeeze=False, observed=None, **kwargs): + group_keys=True, squeeze=False, observed=False, **kwargs): """ Group series using mapper (dict or key function, apply given function to group, return result as series) or by a series of columns. @@ -6617,11 +6617,10 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, squeeze : boolean, default False reduce the dimensionality of the return type if possible, otherwise return a consistent type - observed : boolean, default None - if True: only show observed values for categorical groupers. - if False: show all values for categorical groupers. - if None: if any categorical groupers, show a FutureWarning, - default to False. + observed : boolean, default False + This only applies if any of the groupers are Categoricals + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. .. versionadded:: 0.23.0 diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f78f7cb625218..164d1bebd2929 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -556,7 +556,7 @@ class _GroupBy(PandasObject, SelectionMixin): def __init__(self, obj, keys=None, axis=0, level=None, grouper=None, exclusions=None, selection=None, as_index=True, sort=True, group_keys=True, squeeze=False, - observed=None, **kwargs): + observed=False, **kwargs): self._selection = selection @@ -2907,7 +2907,7 @@ class Grouping(object): """ def __init__(self, index, grouper=None, obj=None, name=None, level=None, - sort=True, observed=None, in_axis=False): + sort=True, observed=False, in_axis=False): self.name = name self.level = level @@ -2964,12 +2964,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # a passed Categorical elif is_categorical_dtype(self.grouper): - # observed can be True/False/None - # we treat None as False. If in the future - # we need to warn if observed is not passed - # then we have this option - # gh-20583 - self.all_grouper = self.grouper self.grouper = self.grouper._codes_for_groupby( self.sort, observed) @@ -3088,7 +3082,7 @@ def groups(self): def _get_grouper(obj, key=None, axis=0, level=None, sort=True, - observed=None, mutated=False, validate=True): + observed=False, mutated=False, validate=True): """ create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. @@ -4734,26 +4728,28 @@ def _wrap_agged_blocks(self, items, blocks): def _reindex_output(self, result): """ - if we have categorical groupers, then we want to make sure that + If we have categorical groupers, then we want to make sure that we have a fully reindex-output to the levels. These may have not participated in the groupings (e.g. may have all been - nan groups) + nan groups); This can re-expand the output space """ - # TODO(jreback): remove completely - # when observed parameter is defaulted to True - # gh-20583 - - if self.observed: - return result - + # we need to re-expand the output space to accomodate all values + # whether observed or not in the cartesian product of our groupes groupings = self.grouper.groupings if groupings is None: return result elif len(groupings) == 1: return result + + # if we only care about the observed values + # we are done + elif self.observed: + return result + + # reindexing only applies to a Categorical grouper elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex)) for ping in groupings): return result
closes #20902
https://api.github.com/repos/pandas-dev/pandas/pulls/20941
2018-05-03T10:16:31Z
2018-05-05T12:44:14Z
2018-05-05T12:44:14Z
2018-05-05T12:44:44Z
BUG: cant modify df with duplicate index (#17105)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e3b4eb5e22dec..c1f588b0072fd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1253,6 +1253,7 @@ Indexing - Bug in ``Series.is_unique`` where extraneous output in stderr is shown if Series contains objects with ``__ne__`` defined (:issue:`20661`) - Bug in ``.loc`` assignment with a single-element list-like incorrectly assigns as a list (:issue:`19474`) - Bug in partial string indexing on a ``Series/DataFrame`` with a monotonic decreasing ``DatetimeIndex`` (:issue:`19362`) +- Bug in performing in-place operations on a ``DataFrame`` with a duplicate ``Index`` (:issue:`17105`) - Bug in :meth:`IntervalIndex.get_loc` and :meth:`IntervalIndex.get_indexer` when used with an :class:`IntervalIndex` containing a single interval (:issue:`17284`, :issue:`20921`) MultiIndex diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7a7e47803c240..858d08d73e603 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1318,7 +1318,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): (indexer, missing) = labels.get_indexer_non_unique(objarr) # 'indexer' has dupes, create 'check' using 'missing' - check = np.zeros_like(objarr) + check = np.zeros(len(objarr)) check[missing] = -1 mask = check == -1 diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index b887b1c9f1218..6d74ce54faa94 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -1929,6 +1929,32 @@ def test_iloc_duplicates(self): expected = df.take([0], axis=1) assert_frame_equal(result, expected) + def test_loc_duplicates(self): + # gh-17105 + + # insert a duplicate element to the index + trange = pd.date_range(start=pd.Timestamp(year=2017, month=1, day=1), + end=pd.Timestamp(year=2017, month=1, day=5)) + + trange = trange.insert(loc=5, + item=pd.Timestamp(year=2017, month=1, day=5)) + + df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + bool_idx = np.array([False, False, False, False, False, True]) + + # assignment + df.loc[trange[bool_idx], "A"] = 6 + + expected = pd.DataFrame({'A': [0, 0, 0, 0, 6, 6], + 'B': [0, 0, 0, 0, 0, 0]}, + index=trange) + tm.assert_frame_equal(df, expected) + + # in-place + df = pd.DataFrame(0, index=trange, columns=["A", "B"]) + df.loc[trange[bool_idx], "A"] += 6 + tm.assert_frame_equal(df, expected) + def test_iloc_sparse_propegate_fill_value(self): from pandas.core.sparse.api import SparseDataFrame df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
- [x] closes #17105 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Fixing to allow the modification of DataFrames that have duplicate elements in the index. Previously it would fail with ``` AttributeError: 'bool' object has no attribute 'any' ``` See #17105 for a code snippet. Replacing `zeros_like(objarray)` with `zeros()` because the first unnecessarily returns an array of zeros with the same types as `objarray`. We only want the zeros, not the type, to be able to later compare against -1 and get an array as a result: The result of `zeros_like()` with dates gives a boolean after comparison ``` >>> myarr_fromindex = np.zeros_like(pd.DatetimeIndex([2,3])) >>> myarr_fromindex array(['1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> >>> type(myarr_fromindex) <type 'numpy.ndarray'> >>> >>> myarr_fromindex == -1 False ``` The result of `zeros_like()` with numbers gives an array after comparison ``` >>> >>> >>> myarr_fromarr = np.zeros_like([2,3]) >>> myarr_fromarr array([0, 0]) >>> type(myarr_fromarr) <type 'numpy.ndarray'> >>> myarr_fromarr == -1 array([False, False]) >>> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20939
2018-05-03T08:28:15Z
2018-05-08T00:23:34Z
2018-05-08T00:23:34Z
2018-05-08T00:23:41Z