title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Backport PR #27814 on branch 0.25.x (BUG: Series.rename raises error on values accepted by Series construc…)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 680d69a9862cd..b307fae4fbdc1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -108,6 +108,7 @@ Other ^^^^^ - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) +- Bug in :meth:`Series.rename` when using a custom type indexer. Now any value that isn't callable or dict-like is treated as a scalar. (:issue:`27814`) .. _whatsnew_0.251.contributors: diff --git a/pandas/core/series.py b/pandas/core/series.py index 42afb3537c5d8..9f31e185fe41a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4207,12 +4207,10 @@ def rename(self, index=None, **kwargs): """ kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace") - non_mapping = is_scalar(index) or ( - is_list_like(index) and not is_dict_like(index) - ) - if non_mapping: + if callable(index) or is_dict_like(index): + return super().rename(index=index, **kwargs) + else: return self._set_name(index, inplace=kwargs.get("inplace")) - return super().rename(index=index, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.reindex.__doc__) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 11add8d61deeb..eeb95bc52bf78 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -267,6 +267,25 @@ def test_rename_axis_none(self, kwargs): expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected) + def test_rename_with_custom_indexer(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]).rename(ix) + assert s.name is ix + + def test_rename_with_custom_indexer_inplace(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]) + s.rename(ix, inplace=True) + assert s.name is ix + def test_set_axis_inplace_axes(self, axis_series): # GH14636 ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
Backport PR #27814: BUG: Series.rename raises error on values accepted by Series construc…
https://api.github.com/repos/pandas-dev/pandas/pulls/28087
2019-08-22T13:07:58Z
2019-08-22T13:47:33Z
2019-08-22T13:47:33Z
2019-08-22T13:47:33Z
Backport PR #27827 on branch 0.25.x (BUG: Fixed groupby quantile for listlike q)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index a7b97b41fb776..050e1939875dc 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -122,6 +122,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Fixed regression in :meth:`pands.core.groupby.DataFrameGroupBy.quantile` raising when multiple quantiles are given (:issue:`27526`) - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) - Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) - Bug in windowing over read-only arrays (:issue:`27766`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b852513e454a2..41b5e0d803659 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1872,6 +1872,7 @@ def quantile(self, q=0.5, interpolation="linear"): a 2.0 b 3.0 """ + from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): @@ -1899,18 +1900,57 @@ def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: return vals - return self._get_cythonized_result( - "group_quantile", - self.grouper, - aggregate=True, - needs_values=True, - needs_mask=True, - cython_dtype=np.float64, - pre_processing=pre_processor, - post_processing=post_processor, - q=q, - interpolation=interpolation, - ) + if is_scalar(q): + return self._get_cythonized_result( + "group_quantile", + self.grouper, + aggregate=True, + needs_values=True, + needs_mask=True, + cython_dtype=np.float64, + pre_processing=pre_processor, + post_processing=post_processor, + q=q, + interpolation=interpolation, + ) + else: + results = [ + self._get_cythonized_result( + "group_quantile", + self.grouper, + aggregate=True, + needs_values=True, + needs_mask=True, + cython_dtype=np.float64, + pre_processing=pre_processor, + post_processing=post_processor, + q=qi, + interpolation=interpolation, + ) + for qi in q + ] + result = concat(results, axis=0, keys=q) + # fix levels to place quantiles on the inside + # TODO(GH-10710): Ideally, we could write this as + # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] + # but this hits https://github.com/pandas-dev/pandas/issues/10710 + # which doesn't reorder the list-like `q` on the inner level. + order = np.roll(list(range(result.index.nlevels)), -1) + result = result.reorder_levels(order) + result = result.reindex(q, level=-1) + + # fix order. + hi = len(q) * self.ngroups + arr = np.arange(0, hi, self.ngroups) + arrays = [] + + for i in range(self.ngroups): + arr = arr + i + arrays.append(arr) + + indices = np.concatenate(arrays) + assert len(indices) == len(result) + return result.take(indices) @Substitution(name="groupby") def ngroup(self, ascending=True): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index efc3142b25b82..397c6653ce967 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1238,6 +1238,57 @@ def test_quantile(interpolation, a_vals, b_vals, q): tm.assert_frame_equal(result, expected) +def test_quantile_array(): + # https://github.com/pandas-dev/pandas/issues/27526 + df = pd.DataFrame({"A": [0, 1, 2, 3, 4]}) + result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25]) + + index = pd.MultiIndex.from_product([[0, 1], [0.25]]) + expected = pd.DataFrame({"A": [0.25, 2.50]}, index=index) + tm.assert_frame_equal(result, expected) + + df = pd.DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]}) + index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]]) + + result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75]) + expected = pd.DataFrame( + {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_no_sort(): + df = pd.DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]}) + result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75]) + expected = pd.DataFrame( + {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]), + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25]) + expected = pd.DataFrame( + {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_multiple_levels(): + df = pd.DataFrame( + {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]} + ) + result = df.groupby(["c", "d"]).quantile([0.25, 0.75]) + index = pd.MultiIndex.from_tuples( + [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)], + names=["c", "d", None], + ) + expected = pd.DataFrame( + {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_raises(): df = pd.DataFrame( [["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"]
Backport PR #27827: BUG: Fixed groupby quantile for listlike q
https://api.github.com/repos/pandas-dev/pandas/pulls/28085
2019-08-22T11:29:00Z
2019-08-22T12:17:29Z
2019-08-22T12:17:28Z
2019-08-23T13:30:32Z
DOC: Remove alias for numpy.random.randn from the docs
diff --git a/doc/source/conf.py b/doc/source/conf.py index 3ebc5d8b6333b..a4b7d97c2cf5e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -315,7 +315,6 @@ import numpy as np import pandas as pd - randn = np.random.randn np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) pd.options.display.max_rows = 15 diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index 59ea6b9776232..2e0442364b2f3 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -498,7 +498,7 @@ Here is a taste of what to expect. .. code-block:: ipython - In [58]: p4d = Panel4D(randn(2, 2, 5, 4), + In [58]: p4d = Panel4D(np.random.randn(2, 2, 5, 4), ....: labels=['Label1','Label2'], ....: items=['Item1', 'Item2'], ....: major_axis=date_range('1/1/2000', periods=5),
Based on this comment: https://github.com/pandas-dev/pandas/issues/28038#issuecomment-523439376 Supersedes #28068
https://api.github.com/repos/pandas-dev/pandas/pulls/28082
2019-08-22T03:41:27Z
2019-08-23T09:01:29Z
2019-08-23T09:01:29Z
2019-08-23T09:01:35Z
REF: separate bloated test
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 7e03b9544ee72..80661cc214fd6 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -791,28 +791,33 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) + # TODO: shouldn't we be applying finalize whenever + # not isinstance(other, ABCSeries)? + finalizer = ( + lambda x: x.__finalize__(self) + if not isinstance(other, (ABCSeries, ABCIndexClass)) + else x + ) + if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) - other = fill_int(other) if is_other_int_dtype else fill_bool(other) - - ovalues = other.values - finalizer = lambda x: x + other = other if is_other_int_dtype else fill_bool(other) else: # scalars, list, tuple, np.array - is_other_int_dtype = is_integer_dtype(np.asarray(other)) + is_other_int_dtype = is_integer_dtype(np.asarray(other).dtype) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) - ovalues = other - finalizer = lambda x: x.__finalize__(self) + # TODO: use extract_array once we handle EA correctly, see GH#27959 + ovalues = lib.values_from_object(other) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 062c07cb6242a..aa44760dcd918 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -36,22 +36,14 @@ def test_bool_operators_with_nas(self, bool_op): expected[mask] = False assert_series_equal(result, expected) - def test_operators_bitwise(self): + def test_logical_operators_bool_dtype_with_empty(self): # GH#9016: support bitwise op for integer types index = list("bca") s_tft = Series([True, False, True], index=index) s_fff = Series([False, False, False], index=index) - s_tff = Series([True, False, False], index=index) s_empty = Series([]) - # TODO: unused - # s_0101 = Series([0, 1, 0, 1]) - - s_0123 = Series(range(4), dtype="int64") - s_3333 = Series([3] * 4) - s_4444 = Series([4] * 4) - res = s_tft & s_empty expected = s_fff assert_series_equal(res, expected) @@ -60,6 +52,16 @@ def test_operators_bitwise(self): expected = s_tft assert_series_equal(res, expected) + def test_logical_operators_int_dtype_with_int_dtype(self): + # GH#9016: support bitwise op for integer types + + # TODO: unused + # s_0101 = Series([0, 1, 0, 1]) + + s_0123 = Series(range(4), dtype="int64") + s_3333 = Series([3] * 4) + s_4444 = Series([4] * 4) + res = s_0123 & s_3333 expected = Series(range(4), dtype="int64") assert_series_equal(res, expected) @@ -68,76 +70,129 @@ def test_operators_bitwise(self): expected = Series(range(4, 8), dtype="int64") assert_series_equal(res, expected) - s_a0b1c0 = Series([1], list("b")) - - res = s_tft & s_a0b1c0 - expected = s_tff.reindex(list("abc")) + s_1111 = Series([1] * 4, dtype="int8") + res = s_0123 & s_1111 + expected = Series([0, 1, 0, 1], dtype="int64") assert_series_equal(res, expected) - res = s_tft | s_a0b1c0 - expected = s_tft.reindex(list("abc")) + res = s_0123.astype(np.int16) | s_1111.astype(np.int32) + expected = Series([1, 1, 3, 3], dtype="int32") assert_series_equal(res, expected) - n0 = 0 - res = s_tft & n0 - expected = s_fff - assert_series_equal(res, expected) + def test_logical_operators_int_dtype_with_int_scalar(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") - res = s_0123 & n0 + res = s_0123 & 0 expected = Series([0] * 4) assert_series_equal(res, expected) - n1 = 1 - res = s_tft & n1 - expected = s_tft - assert_series_equal(res, expected) - - res = s_0123 & n1 + res = s_0123 & 1 expected = Series([0, 1, 0, 1]) assert_series_equal(res, expected) - s_1111 = Series([1] * 4, dtype="int8") - res = s_0123 & s_1111 - expected = Series([0, 1, 0, 1], dtype="int64") - assert_series_equal(res, expected) - - res = s_0123.astype(np.int16) | s_1111.astype(np.int32) - expected = Series([1, 1, 3, 3], dtype="int32") - assert_series_equal(res, expected) + def test_logical_operators_int_dtype_with_float(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") - with pytest.raises(TypeError): - s_1111 & "a" - with pytest.raises(TypeError): - s_1111 & ["a", "b", "c", "d"] with pytest.raises(TypeError): s_0123 & np.NaN with pytest.raises(TypeError): s_0123 & 3.14 with pytest.raises(TypeError): s_0123 & [0.1, 4, 3.14, 2] + with pytest.raises(TypeError): + s_0123 & np.array([0.1, 4, 3.14, 2]) - # s_0123 will be all false now because of reindexing like s_tft - exp = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) - assert_series_equal(s_tft & s_0123, exp) - - # s_tft will be all false now because of reindexing like s_0123 - exp = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) - assert_series_equal(s_0123 & s_tft, exp) - - assert_series_equal(s_0123 & False, Series([False] * 4)) - assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) - assert_series_equal(s_0123 & [False], Series([False] * 4)) - assert_series_equal(s_0123 & (False), Series([False] * 4)) - assert_series_equal( - s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4) - ) + # FIXME: this should be consistent with the list case above + expected = Series([False, True, False, True]) + result = s_0123 & Series([0.1, 4, -3.14, 2]) + assert_series_equal(result, expected) + + def test_logical_operators_int_dtype_with_str(self): + s_1111 = Series([1] * 4, dtype="int8") + + with pytest.raises(TypeError): + s_1111 & "a" + with pytest.raises(TypeError): + s_1111 & ["a", "b", "c", "d"] + + def test_logical_operators_int_dtype_with_bool(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + expected = Series([False] * 4) + + result = s_0123 & False + assert_series_equal(result, expected) + + result = s_0123 & [False] + assert_series_equal(result, expected) + + result = s_0123 & (False,) + assert_series_equal(result, expected) - s_ftft = Series([False, True, False, True]) - assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) + result = s_0123 ^ False + expected = Series([False, True, True, True]) + assert_series_equal(result, expected) + + def test_logical_operators_int_dtype_with_object(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype="int64") + + result = s_0123 & Series([False, np.NaN, False, False]) + expected = Series([False] * 4) + assert_series_equal(result, expected) s_abNd = Series(["a", "b", np.NaN, "d"]) - res = s_0123 & s_abNd - expected = s_ftft + result = s_0123 & s_abNd + expected = Series([False, True, False, True]) + assert_series_equal(result, expected) + + def test_logical_operators_bool_dtype_with_int(self): + index = list("bca") + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + + res = s_tft & 0 + expected = s_fff + assert_series_equal(res, expected) + + res = s_tft & 1 + expected = s_tft + assert_series_equal(res, expected) + + def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self): + # GH#9016: support bitwise op for integer types + + # with non-matching indexes, logical operators will cast to object + # before operating + index = list("bca") + + s_tft = Series([True, False, True], index=index) + s_tft = Series([True, False, True], index=index) + s_tff = Series([True, False, False], index=index) + + s_0123 = Series(range(4), dtype="int64") + + # s_0123 will be all false now because of reindexing like s_tft + expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) + result = s_tft & s_0123 + assert_series_equal(result, expected) + + expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"]) + result = s_0123 & s_tft + assert_series_equal(result, expected) + + s_a0b1c0 = Series([1], list("b")) + + res = s_tft & s_a0b1c0 + expected = s_tff.reindex(list("abc")) + assert_series_equal(res, expected) + + res = s_tft | s_a0b1c0 + expected = s_tft.reindex(list("abc")) assert_series_equal(res, expected) def test_scalar_na_logical_ops_corners(self): @@ -523,6 +578,7 @@ def test_comparison_operators_with_nas(self): assert_series_equal(result, expected) + # FIXME: dont leave commented-out # fffffffuuuuuuuuuuuu # result = f(val, s) # expected = f(val, s.dropna()).reindex(s.index)
Working on logical ops I'm finding a bunch of internal inconsistencies that will need to be addressed. Since those are liable to have large diffs, this starts off with refactor that leaves behavior unchanged: simplify the method itself, and split a giant test into well-scoped ones.
https://api.github.com/repos/pandas-dev/pandas/pulls/28081
2019-08-22T03:09:37Z
2019-09-02T21:06:27Z
2019-09-02T21:06:27Z
2019-09-02T22:56:03Z
Auto backport of pr 28074 on 0.25.x
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index a7b97b41fb776..eb54961309e8e 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -1,57 +1,44 @@ .. _whatsnew_0251: -What's new in 0.25.1 (July XX, 2019) ------------------------------------- +What's new in 0.25.1 (August 21, 2019) +-------------------------------------- -Enhancements -~~~~~~~~~~~~ - - -.. _whatsnew_0251.enhancements.other: +These are the changes in pandas 0.25.1. See :ref:`release` for a full changelog +including other versions of pandas. -Other enhancements -^^^^^^^^^^^^^^^^^^ +I/O and LZMA +~~~~~~~~~~~~ -- -- -- +Some users may unknowingly have an incomplete Python installation lacking the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). +Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. +A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. +For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. .. _whatsnew_0251.bug_fixes: Bug fixes ~~~~~~~~~ - Categorical ^^^^^^^^^^^ -- Bug in :meth:`Categorical.fillna` would replace all values, not just those that are ``NaN`` (:issue:`26215`) -- -- +- Bug in :meth:`Categorical.fillna` that would replace all values, not just those that are ``NaN`` (:issue:`26215`) Datetimelike ^^^^^^^^^^^^ + - Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`) -- -- - -Timedelta -^^^^^^^^^ - -- -- -- +- Bug in iterating over :class:`DatetimeIndex` when the underlying data is read-only (:issue:`28055`) Timezones ^^^^^^^^^ - Bug in :class:`Index` where a numpy object array with a timezone aware :class:`Timestamp` and ``np.nan`` would not return a :class:`DatetimeIndex` (:issue:`27011`) -- -- Numeric ^^^^^^^ + - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) - Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) @@ -61,23 +48,11 @@ Conversion ^^^^^^^^^^ - Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) -- -- - -Strings -^^^^^^^ - -- -- -- - Interval ^^^^^^^^ + - Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`) -- -- -- Indexing ^^^^^^^^ @@ -86,38 +61,26 @@ Indexing - Break reference cycle involving :class:`Index` and other index classes to allow garbage collection of index objects without running the GC. (:issue:`27585`, :issue:`27840`) - Fix regression in assigning values to a single column of a DataFrame with a ``MultiIndex`` columns (:issue:`27841`). - Fix regression in ``.ix`` fallback with an ``IntervalIndex`` (:issue:`27865`). -- Missing ^^^^^^^ -- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. `type(pandas.Series())` (:issue:`27482`) -- -- - -MultiIndex -^^^^^^^^^^ - -- -- -- +- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. ``type(pandas.Series())`` (:issue:`27482`) I/O ^^^ + - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) -- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the html repr in the notebook (:issue:`27991`). -- +- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the HTML repr in the notebook (:issue:`27991`). Plotting ^^^^^^^^ -- Added a pandas_plotting_backends entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). +- Added a ``pandas_plotting_backends`` entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). - Fixed the re-instatement of Matplotlib datetime converters after calling - `pandas.plotting.deregister_matplotlib_converters()` (:issue:`27481`). -- + :meth:`pandas.plotting.deregister_matplotlib_converters` (:issue:`27481`). - Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`). -- Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -125,8 +88,7 @@ Groupby/resample/rolling - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) - Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) - Bug in windowing over read-only arrays (:issue:`27766`) -- -- +- Fixed segfault in `pandas.core.groupby.DataFrameGroupBy.quantile` when an invalid quantile was passed (:issue:`27470`) Reshaping ^^^^^^^^^ @@ -139,39 +101,12 @@ Reshaping Sparse ^^^^^^ -- -- -- - - -Build Changes -^^^^^^^^^^^^^ - -- -- -- - -ExtensionArray -^^^^^^^^^^^^^^ - -- -- -- +- Bug in reductions for :class:`Series` with Sparse dtypes (:issue:`27080`) Other ^^^^^ -- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) -- -- -- - -I/O and LZMA -~~~~~~~~~~~~ -Some users may unknowingly have an incomplete Python installation, which lacks the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). -Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. -A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. -For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) .. _whatsnew_0.251.contributors: diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4e49f660f5e19..01e500a80dcc4 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -71,7 +71,7 @@ cdef inline object create_time_from_ts( @cython.wraparound(False) @cython.boundscheck(False) -def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, +def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None, str box="datetime"): """ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 4ea32359b8d4a..ab3107a0798e5 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -377,3 +377,11 @@ def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) tm.assert_index_equal(dti.nanosecond, pd.Index(np.arange(10, dtype=np.int64))) + + +def test_iter_readonly(): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = pd.to_datetime(arr) + list(dti)
#28073 and #28074
https://api.github.com/repos/pandas-dev/pandas/pulls/28078
2019-08-21T21:08:37Z
2019-08-22T11:30:24Z
2019-08-22T11:30:24Z
2019-08-22T11:30:28Z
Backport PR #28072 on branch 0.25.x (TST: non-strict xfail for period test)
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index b57b817461788..04b92e263d645 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1581,7 +1581,11 @@ def test_period_immutable(): @pytest.mark.xfail( - PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True + # xpassing on MacPython with strict=False + # https://travis-ci.org/MacPython/pandas-wheels/jobs/574706922 + PY35, + reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", + strict=False, ) def test_small_year_parsing(): per1 = Period("0001-01-07", "D")
Backport PR #28072: TST: non-strict xfail for period test
https://api.github.com/repos/pandas-dev/pandas/pulls/28077
2019-08-21T20:53:53Z
2019-08-22T11:29:21Z
2019-08-22T11:29:21Z
2019-08-22T11:29:21Z
Backport PR #28065: CI: disable codecov
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ee46da9f52eab..27d3fcb4cf563 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -50,9 +50,10 @@ do # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" - if [[ "$COVERAGE" && $? == 0 ]]; then - echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME - fi + # 2019-08-21 disabling because this is hitting HTTP 400 errors GH#27602 + # if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then + # echo "uploading coverage for $TYPE tests" + # echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" + # bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + # fi done
https://api.github.com/repos/pandas-dev/pandas/pulls/28076
2019-08-21T20:51:21Z
2019-08-22T11:31:22Z
2019-08-22T11:31:22Z
2019-08-22T11:31:26Z
DOC: redirect from_csv search
diff --git a/doc/redirects.csv b/doc/redirects.csv index a7886779c97d5..975fefead67a5 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -1579,3 +1579,6 @@ generated/pandas.unique,../reference/api/pandas.unique generated/pandas.util.hash_array,../reference/api/pandas.util.hash_array generated/pandas.util.hash_pandas_object,../reference/api/pandas.util.hash_pandas_object generated/pandas.wide_to_long,../reference/api/pandas.wide_to_long + +# Cached searches +reference/api/pandas.DataFrame.from_csv,pandas.read_csv
A web search for "DataFrame from CSV" leads to https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.from_csv.html which is currently a 404, since DataFrame.from_csv was removed. Redirect it to read_csv. Normally we shouldn't worry about this, but I suspect this is a common search term.
https://api.github.com/repos/pandas-dev/pandas/pulls/28075
2019-08-21T20:30:56Z
2019-09-28T09:14:07Z
2019-09-28T09:14:06Z
2019-09-28T09:14:07Z
BUG: iter with readonly values, closes #28055
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index b658a6efbd1a1..eb54961309e8e 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -29,6 +29,7 @@ Datetimelike - Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`) +- Bug in iterating over :class:`DatetimeIndex` when the underlying data is read-only (:issue:`28055`) Timezones ^^^^^^^^^ diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4e49f660f5e19..01e500a80dcc4 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -71,7 +71,7 @@ cdef inline object create_time_from_ts( @cython.wraparound(False) @cython.boundscheck(False) -def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, +def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None, str box="datetime"): """ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 4ea32359b8d4a..ab3107a0798e5 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -377,3 +377,11 @@ def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) tm.assert_index_equal(dti.nanosecond, pd.Index(np.arange(10, dtype=np.int64))) + + +def test_iter_readonly(): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = pd.to_datetime(arr) + list(dti)
@TomAugspurger wanted to get this in under the wire? - [x] closes #28055 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28074
2019-08-21T20:20:23Z
2019-08-21T20:54:42Z
2019-08-21T20:54:42Z
2019-08-21T21:35:23Z
DOC: Update whatsnew
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 86d6db01c10c2..b658a6efbd1a1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -1,56 +1,43 @@ .. _whatsnew_0251: -What's new in 0.25.1 (July XX, 2019) ------------------------------------- +What's new in 0.25.1 (August 21, 2019) +-------------------------------------- -Enhancements -~~~~~~~~~~~~ - - -.. _whatsnew_0251.enhancements.other: +These are the changes in pandas 0.25.1. See :ref:`release` for a full changelog +including other versions of pandas. -Other enhancements -^^^^^^^^^^^^^^^^^^ +I/O and LZMA +~~~~~~~~~~~~ -- -- -- +Some users may unknowingly have an incomplete Python installation lacking the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). +Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. +A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. +For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. .. _whatsnew_0251.bug_fixes: Bug fixes ~~~~~~~~~ - Categorical ^^^^^^^^^^^ -- Bug in :meth:`Categorical.fillna` would replace all values, not just those that are ``NaN`` (:issue:`26215`) -- +- Bug in :meth:`Categorical.fillna` that would replace all values, not just those that are ``NaN`` (:issue:`26215`) Datetimelike ^^^^^^^^^^^^ + - Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`) -- -- - -Timedelta -^^^^^^^^^ - -- -- -- Timezones ^^^^^^^^^ - Bug in :class:`Index` where a numpy object array with a timezone aware :class:`Timestamp` and ``np.nan`` would not return a :class:`DatetimeIndex` (:issue:`27011`) -- -- Numeric ^^^^^^^ + - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) - Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) @@ -60,23 +47,11 @@ Conversion ^^^^^^^^^^ - Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) -- -- - -Strings -^^^^^^^ - -- -- -- - Interval ^^^^^^^^ + - Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`) -- -- -- Indexing ^^^^^^^^ @@ -85,38 +60,26 @@ Indexing - Break reference cycle involving :class:`Index` and other index classes to allow garbage collection of index objects without running the GC. (:issue:`27585`, :issue:`27840`) - Fix regression in assigning values to a single column of a DataFrame with a ``MultiIndex`` columns (:issue:`27841`). - Fix regression in ``.ix`` fallback with an ``IntervalIndex`` (:issue:`27865`). -- Missing ^^^^^^^ -- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. `type(pandas.Series())` (:issue:`27482`) -- -- - -MultiIndex -^^^^^^^^^^ - -- -- -- +- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. ``type(pandas.Series())`` (:issue:`27482`) I/O ^^^ + - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) -- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the html repr in the notebook (:issue:`27991`). -- +- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the HTML repr in the notebook (:issue:`27991`). Plotting ^^^^^^^^ -- Added a pandas_plotting_backends entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). +- Added a ``pandas_plotting_backends`` entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). - Fixed the re-instatement of Matplotlib datetime converters after calling - `pandas.plotting.deregister_matplotlib_converters()` (:issue:`27481`). -- + :meth:`pandas.plotting.deregister_matplotlib_converters` (:issue:`27481`). - Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`). -- Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -125,7 +88,6 @@ Groupby/resample/rolling - Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) - Bug in windowing over read-only arrays (:issue:`27766`) - Fixed segfault in `pandas.core.groupby.DataFrameGroupBy.quantile` when an invalid quantile was passed (:issue:`27470`) -- Reshaping ^^^^^^^^^ @@ -137,40 +99,13 @@ Reshaping Sparse ^^^^^^ -- Bug in reductions for :class:`Series` with Sparse dtypes (:issue:`27080`) -- -- -- - - -Build Changes -^^^^^^^^^^^^^ - -- -- -- - -ExtensionArray -^^^^^^^^^^^^^^ -- -- -- +- Bug in reductions for :class:`Series` with Sparse dtypes (:issue:`27080`) Other ^^^^^ -- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) -- -- -- - -I/O and LZMA -~~~~~~~~~~~~ -Some users may unknowingly have an incomplete Python installation, which lacks the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). -Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. -A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. -For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) .. _whatsnew_0.251.contributors:
https://api.github.com/repos/pandas-dev/pandas/pulls/28073
2019-08-21T19:39:00Z
2019-08-21T20:52:03Z
2019-08-21T20:52:03Z
2019-08-21T21:09:16Z
TST: non-strict xfail for period test
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 6da4d556ea07e..a1de205afc0e2 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1549,7 +1549,11 @@ def test_period_immutable(): @pytest.mark.xfail( - PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True + # xpassing on MacPython with strict=False + # https://travis-ci.org/MacPython/pandas-wheels/jobs/574706922 + PY35, + reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", + strict=False, ) def test_small_year_parsing(): per1 = Period("0001-01-07", "D")
This was XPASSing on the MacPython builds: https://travis-ci.org/MacPython/pandas-wheels/jobs/574706922
https://api.github.com/repos/pandas-dev/pandas/pulls/28072
2019-08-21T19:31:38Z
2019-08-21T20:53:10Z
2019-08-21T20:53:10Z
2019-08-21T20:53:13Z
Backport PR #28067 on branch 0.25.x (Set SHA for codecov upload)
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ee46da9f52eab..74c1cde325a02 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -51,8 +51,9 @@ do sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" if [[ "$COVERAGE" && $? == 0 ]]; then + SHA=`git rev-parse HEAD` echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C $SHA" + bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C `git rev-parse HEAD` fi done
Backport PR #28067: Set SHA for codecov upload
https://api.github.com/repos/pandas-dev/pandas/pulls/28070
2019-08-21T18:32:40Z
2019-08-21T19:43:58Z
null
2019-08-21T19:43:58Z
set sha
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ee46da9f52eab..74c1cde325a02 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -51,8 +51,9 @@ do sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" if [[ "$COVERAGE" && $? == 0 ]]; then + SHA=`git rev-parse HEAD` echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C $SHA" + bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C `git rev-parse HEAD` fi done
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28069
2019-08-21T17:41:31Z
2019-08-21T18:32:37Z
null
2019-08-21T18:32:37Z
Remove alias in pandas docs for numpy.random.randn
diff --git a/doc/source/conf.py b/doc/source/conf.py index 3ebc5d8b6333b..a4b7d97c2cf5e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -315,7 +315,6 @@ import numpy as np import pandas as pd - randn = np.random.randn np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) pd.options.display.max_rows = 15
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28068
2019-08-21T17:32:05Z
2019-08-22T12:55:25Z
null
2019-08-22T12:55:25Z
Set SHA for codecov upload
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ee46da9f52eab..74c1cde325a02 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -51,8 +51,9 @@ do sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" if [[ "$COVERAGE" && $? == 0 ]]; then + SHA=`git rev-parse HEAD` echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C $SHA" + bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C `git rev-parse HEAD` fi done
Just a test.
https://api.github.com/repos/pandas-dev/pandas/pulls/28067
2019-08-21T17:11:08Z
2019-08-21T18:32:28Z
2019-08-21T18:32:28Z
2019-08-21T18:32:32Z
REF: do extract_array earlier in series arith/comparison ops
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 7e03b9544ee72..4fa2267e9af78 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,7 +5,7 @@ """ import datetime import operator -from typing import Any, Callable, Tuple +from typing import Any, Callable, Tuple, Union import numpy as np @@ -34,10 +34,11 @@ ABCIndexClass, ABCSeries, ABCSparseSeries, + ABCTimedeltaArray, + ABCTimedeltaIndex, ) from pandas.core.dtypes.missing import isna, notna -import pandas as pd from pandas._typing import ArrayLike from pandas.core.construction import array, extract_array from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY, define_na_arithmetic_op @@ -148,6 +149,8 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ + from pandas.core.arrays import TimedeltaArray + if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype @@ -157,12 +160,10 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): if isna(obj): # wrapping timedelta64("NaT") in Timedelta returns NaT, # which would incorrectly be treated as a datetime-NaT, so - # we broadcast and wrap in a Series + # we broadcast and wrap in a TimedeltaArray + obj = obj.astype("timedelta64[ns]") right = np.broadcast_to(obj, shape) - - # Note: we use Series instead of TimedeltaIndex to avoid having - # to worry about catching NullFrequencyError. - return pd.Series(right) + return TimedeltaArray(right) # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like @@ -173,7 +174,7 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 - return pd.TimedeltaIndex(obj) + return TimedeltaArray._from_sequence(obj) return obj @@ -520,13 +521,34 @@ def column_op(a, b): return result -def dispatch_to_extension_op(op, left, right): +def dispatch_to_extension_op( + op, + left: Union[ABCExtensionArray, np.ndarray], + right: Any, + keep_null_freq: bool = False, +): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. + + Parameters + ---------- + op : binary operator + left : ExtensionArray or np.ndarray + right : object + keep_null_freq : bool, default False + Whether to re-raise a NullFrequencyError unchanged, as opposed to + catching and raising TypeError. + + Returns + ------- + ExtensionArray or np.ndarray + 2-tuple of these if op is divmod or rdivmod """ + # NB: left and right should already be unboxed, so neither should be + # a Series or Index. - if left.dtype.kind in "mM": + if left.dtype.kind in "mM" and isinstance(left, np.ndarray): # We need to cast datetime64 and timedelta64 ndarrays to # DatetimeArray/TimedeltaArray. But we avoid wrapping others in # PandasArray as that behaves poorly with e.g. IntegerArray. @@ -535,15 +557,15 @@ def dispatch_to_extension_op(op, left, right): # The op calls will raise TypeError if the op is not defined # on the ExtensionArray - # unbox Series and Index to arrays - new_left = extract_array(left, extract_numpy=True) - new_right = extract_array(right, extract_numpy=True) - try: - res_values = op(new_left, new_right) + res_values = op(left, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. + if keep_null_freq: + # TODO: remove keep_null_freq after Timestamp+int deprecation + # GH#22535 is enforced + raise raise TypeError( "incompatible type for a datetime/timedelta " "operation [{name}]".format(name=op.__name__) @@ -615,25 +637,29 @@ def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented + keep_null_freq = isinstance( + right, + (ABCDatetimeIndex, ABCDatetimeArray, ABCTimedeltaIndex, ABCTimedeltaArray), + ) + left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) - right = maybe_upcast_for_op(right, left.shape) - if should_extension_dispatch(left, right): - result = dispatch_to_extension_op(op, left, right) + lvalues = extract_array(left, extract_numpy=True) + rvalues = extract_array(right, extract_numpy=True) - elif is_timedelta64_dtype(right) or isinstance( - right, (ABCDatetimeArray, ABCDatetimeIndex) - ): - # We should only get here with td64 right with non-scalar values - # for right upcast by maybe_upcast_for_op - assert not isinstance(right, (np.timedelta64, np.ndarray)) - result = op(left._values, right) + rvalues = maybe_upcast_for_op(rvalues, lvalues.shape) - else: - lvalues = extract_array(left, extract_numpy=True) - rvalues = extract_array(right, extract_numpy=True) + if should_extension_dispatch(lvalues, rvalues): + result = dispatch_to_extension_op(op, lvalues, rvalues, keep_null_freq) + + elif is_timedelta64_dtype(rvalues) or isinstance(rvalues, ABCDatetimeArray): + # We should only get here with td64 rvalues with non-scalar values + # for rvalues upcast by maybe_upcast_for_op + assert not isinstance(rvalues, (np.timedelta64, np.ndarray)) + result = dispatch_to_extension_op(op, lvalues, rvalues, keep_null_freq) + else: with np.errstate(all="ignore"): result = na_op(lvalues, rvalues) @@ -708,25 +734,25 @@ def wrapper(self, other, axis=None): if len(self) != len(other): raise ValueError("Lengths must match to compare") - if should_extension_dispatch(self, other): - res_values = dispatch_to_extension_op(op, self, other) + lvalues = extract_array(self, extract_numpy=True) + rvalues = extract_array(other, extract_numpy=True) - elif is_scalar(other) and isna(other): + if should_extension_dispatch(lvalues, rvalues): + res_values = dispatch_to_extension_op(op, lvalues, rvalues) + + elif is_scalar(rvalues) and isna(rvalues): # numpy does not like comparisons vs None if op is operator.ne: - res_values = np.ones(len(self), dtype=bool) + res_values = np.ones(len(lvalues), dtype=bool) else: - res_values = np.zeros(len(self), dtype=bool) + res_values = np.zeros(len(lvalues), dtype=bool) else: - lvalues = extract_array(self, extract_numpy=True) - rvalues = extract_array(other, extract_numpy=True) - with np.errstate(all="ignore"): res_values = na_op(lvalues, rvalues) if is_scalar(res_values): raise TypeError( - "Could not compare {typ} type with Series".format(typ=type(other)) + "Could not compare {typ} type with Series".format(typ=type(rvalues)) ) result = self._constructor(res_values, index=self.index)
With this, the middle third of _arith_method_SERIES and _comp_method_SERIES are array-specific and can be refactored out (separate step) to become a) block-wise implementation for DataFrame and b) PandasArray implementation. This also simplifies the NullFrequencyError handling nicely.
https://api.github.com/repos/pandas-dev/pandas/pulls/28066
2019-08-21T17:09:14Z
2019-09-02T21:09:20Z
2019-09-02T21:09:20Z
2019-09-02T22:54:55Z
CI: disable codecov
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index 74c1cde325a02..27d3fcb4cf563 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -50,10 +50,10 @@ do # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" - if [[ "$COVERAGE" && $? == 0 ]]; then - SHA=`git rev-parse HEAD` - echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C $SHA" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME -C `git rev-parse HEAD` - fi + # 2019-08-21 disabling because this is hitting HTTP 400 errors GH#27602 + # if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then + # echo "uploading coverage for $TYPE tests" + # echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" + # bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + # fi done
xref #27602
https://api.github.com/repos/pandas-dev/pandas/pulls/28065
2019-08-21T16:53:22Z
2019-08-21T20:23:15Z
2019-08-21T20:23:15Z
2019-08-21T21:09:16Z
CI: Test azure
Testing that an azure CI build is triggered (and passes) for 0.25.x
https://api.github.com/repos/pandas-dev/pandas/pulls/28064
2019-08-21T16:08:39Z
2019-08-21T17:09:15Z
null
2019-08-21T18:41:16Z
CI: Test azure
In the azure UI we had set up a branch filter to only build on master. This is testing that we still build for PRs against master.
https://api.github.com/repos/pandas-dev/pandas/pulls/28063
2019-08-21T16:06:11Z
2019-08-21T16:06:36Z
null
2019-08-21T16:07:13Z
PERF: cython-optimized datetime constructor
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 0a3f4ed3cc91d..e22c436b49809 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -10,6 +10,7 @@ import pytz # stdlib datetime imports from datetime import time as datetime_time from cpython.datetime cimport (datetime, tzinfo, + datetime_new, PyDateTime_Check, PyDate_Check, PyDateTime_IMPORT) PyDateTime_IMPORT @@ -432,9 +433,10 @@ cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts, return obj # Keep the converter same as PyDateTime's - dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, - obj.dts.hour, obj.dts.min, obj.dts.sec, - obj.dts.us, obj.tzinfo) + # datetime_new is a cython-fastpath for the datetime constructor + dt = datetime_new(obj.dts.year, obj.dts.month, obj.dts.day, + obj.dts.hour, obj.dts.min, obj.dts.sec, + obj.dts.us, obj.tzinfo) obj = convert_datetime_to_tsobject( dt, tz, nanos=obj.dts.ps // 1000) return obj @@ -684,7 +686,8 @@ def normalize_date(dt: object) -> datetime: return dt.replace(hour=0, minute=0, second=0, microsecond=0) # TODO: Make sure DST crossing is handled correctly here elif PyDate_Check(dt): - return datetime(dt.year, dt.month, dt.day) + # datetime_new is a cython-fastpath for the datetime constructor + return datetime_new(dt.year, dt.month, dt.day, 0, 0, 0, 0, None) else: raise TypeError('Unrecognized type: %s' % type(dt)) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index dd0c6fc75b06f..db22e0c55c122 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -5,7 +5,7 @@ import cython from cython import Py_ssize_t from cpython.datetime cimport ( - PyDateTime_IMPORT, PyDelta_Check, datetime, tzinfo) + PyDateTime_IMPORT, PyDelta_Check, datetime, tzinfo, datetime_new) PyDateTime_IMPORT import pytz @@ -468,8 +468,8 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True): datetime dt dt64_to_dtstruct(val, &dts) - dt = datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us) + dt = datetime_new(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, dts.us, None) # get_utcoffset (tz.utcoffset under the hood) only makes sense if datetime # is _wall time_, so if val is a UTC timestamp convert to wall time if not to_utc:
Calling the datetime constructor goes through python space, so the C code generated by `dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, obj.dts.hour, obj.dts.min, obj.dts.sec, obj.dts.us, obj.tzinfo)` in master is: ``` __pyx_t_1 = __Pyx_PyInt_From_npy_int64(__pyx_v_obj->dts.year); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.month); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.day); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_4 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.hour); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.min); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.sec); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyInt_From_npy_int32(__pyx_v_obj->dts.us); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 437, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_12 = PyTuple_New(8); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_12, 2, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_12, 3, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_12, 4, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_12, 5, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_12, 6, __pyx_t_11); __Pyx_INCREF(__pyx_v_obj->tzinfo); __Pyx_GIVEREF(__pyx_v_obj->tzinfo); PyTuple_SET_ITEM(__pyx_t_12, 7, __pyx_v_obj->tzinfo); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_7 = 0; __pyx_t_4 = 0; __pyx_t_2 = 0; __pyx_t_5 = 0; __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_7cpython_8datetime_datetime), __pyx_t_12, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_dt = ((PyDateTime_DateTime *)__pyx_t_11); __pyx_t_11 = 0; ``` Using `datetime_new` we instead get: ``` __pyx_t_1 = __pyx_v_obj->tzinfo; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = __pyx_f_7cpython_8datetime_datetime_new(__pyx_v_obj->dts.year, __pyx_v_obj->dts.month, __pyx_v_obj->dts.day, __pyx_v_obj->dts.hour, __pyx_v_obj->dts.min, __pyx_v_obj->dts.sec, __pyx_v_obj->dts.us, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 437, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_7cpython_8datetime_datetime))))) __PYX_ERR(0, 437, __pyx_L1_error) __pyx_v_dt = ((PyDateTime_DateTime *)__pyx_t_3); __pyx_t_3 = 0; ``` Still need to run asv to see if this actually matters.
https://api.github.com/repos/pandas-dev/pandas/pulls/28061
2019-08-21T15:57:00Z
2019-08-22T01:05:04Z
null
2020-04-05T17:35:53Z
Update groupby
- [ ] closes #20519 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28060
2019-08-21T15:26:38Z
2019-08-22T17:21:07Z
null
2019-08-22T17:21:07Z
BUG: Correct the previous bug fixing on xlim for plotting
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index fbca57206e163..6ff3f28440303 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -33,6 +33,8 @@ from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import ( _flatten, + _get_all_lines, + _get_xlim, _handle_shared_axes, _subplots, format_date_labels, @@ -1099,8 +1101,13 @@ def _make_plot(self): ) self._add_legend_handle(newlines[0], label, index=i) - # GH27686 set_xlim will truncate xaxis to fixed space - ax.relim() + if self._is_ts_plot(): + + # reset of xlim should be used for ts data + # TODO: GH28021, should find a way to change view limit on xaxis + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) @classmethod def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index fd2913ca51ac3..67fa79ad5da8c 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -356,3 +356,24 @@ def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=Non if yrot is not None: plt.setp(ax.get_yticklabels(), rotation=yrot) return axes + + +def _get_all_lines(ax): + lines = ax.get_lines() + + if hasattr(ax, "right_ax"): + lines += ax.right_ax.get_lines() + + if hasattr(ax, "left_ax"): + lines += ax.left_ax.get_lines() + + return lines + + +def _get_xlim(lines): + left, right = np.inf, -np.inf + for l in lines: + x = l.get_xdata(orig=False) + left = min(np.nanmin(x), left) + right = max(np.nanmax(x), right) + return left, right diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index be87929b4545a..e2b7f2819f957 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -419,8 +419,6 @@ def test_get_finder(self): assert conv.get_finder("A") == conv._annual_finder assert conv.get_finder("W") == conv._daily_finder - # TODO: The finder should be retested due to wrong xlim values on x-axis - @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_daily(self): day_lst = [10, 40, 252, 400, 950, 2750, 10000] @@ -444,8 +442,6 @@ def test_finder_daily(self): assert rs1 == xpl1 assert rs2 == xpl2 - # TODO: The finder should be retested due to wrong xlim values on x-axis - @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_quarterly(self): yrs = [3.5, 11] @@ -469,8 +465,6 @@ def test_finder_quarterly(self): assert rs1 == xpl1 assert rs2 == xpl2 - # TODO: The finder should be retested due to wrong xlim values on x-axis - @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_monthly(self): yrs = [1.15, 2.5, 4, 11] @@ -504,8 +498,6 @@ def test_finder_monthly_long(self): xp = Period("1989Q1", "M").ordinal assert rs == xp - # TODO: The finder should be retested due to wrong xlim values on x-axis - @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] @@ -530,7 +522,7 @@ def test_finder_minutely(self): _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[1] + rs = xaxis.get_majorticklocs()[0] xp = Period("1/1/1999", freq="Min").ordinal assert rs == xp @@ -542,7 +534,7 @@ def test_finder_hourly(self): _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[1] + rs = xaxis.get_majorticklocs()[0] xp = Period("1/1/1999", freq="H").ordinal assert rs == xp @@ -1418,9 +1410,7 @@ def test_plot_outofbounds_datetime(self): def test_format_timedelta_ticks_narrow(self): - expected_labels = [ - "00:00:00.0000000{:0>2d}".format(i) for i in np.arange(0, 10, 2) - ] + expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in np.arange(10)] rng = timedelta_range("0", periods=10, freq="ns") df = DataFrame(np.random.randn(len(rng), 3), rng) @@ -1430,8 +1420,8 @@ def test_format_timedelta_ticks_narrow(self): labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] - assert (len(result_labels) - 2) == len(expected_labels) - assert result_labels[1:-1] == expected_labels + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels def test_format_timedelta_ticks_wide(self): expected_labels = [ @@ -1454,8 +1444,8 @@ def test_format_timedelta_ticks_wide(self): labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] - assert (len(result_labels) - 2) == len(expected_labels) - assert result_labels[1:-1] == expected_labels + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels def test_timedelta_plot(self): # test issue #8711
- [ ] xref: #28021 #27993 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28059
2019-08-21T12:42:37Z
2019-08-21T15:18:10Z
2019-08-21T15:18:10Z
2019-08-21T15:18:11Z
Backport PR #28029 on branch 0.25.x (DOC: Change document code prun in a row)
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index c15991fabfd3b..fa2fe1ad3989b 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -243,9 +243,9 @@ We've gotten another big improvement. Let's check again where the time is spent: .. ipython:: python - %prun -l 4 apply_integrate_f(df['a'].to_numpy(), - df['b'].to_numpy(), - df['N'].to_numpy()) + %%prun -l 4 apply_integrate_f(df['a'].to_numpy(), + df['b'].to_numpy(), + df['N'].to_numpy()) As one might expect, the majority of the time is now spent in ``apply_integrate_f``, so if we wanted to make anymore efficiencies we must continue to concentrate our
Backport PR #28029: DOC: Change document code prun in a row
https://api.github.com/repos/pandas-dev/pandas/pulls/28054
2019-08-21T06:43:34Z
2019-08-21T17:08:05Z
2019-08-21T17:08:05Z
2019-08-21T17:08:05Z
DOC: fix punctuation in Timestamp/Timedelta docstrings
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 020d1acf0b4ce..9306338029b73 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -545,7 +545,7 @@ class NaTType(_NaT): """) round = _make_nat_func('round', # noqa:E128 """ - Round the Timestamp to the specified resolution + Round the Timestamp to the specified resolution. Parameters ---------- @@ -583,7 +583,7 @@ default 'raise' """) floor = _make_nat_func('floor', # noqa:E128 """ - return a new Timestamp floored to this resolution + return a new Timestamp floored to this resolution. Parameters ---------- @@ -617,7 +617,7 @@ default 'raise' """) ceil = _make_nat_func('ceil', # noqa:E128 """ - return a new Timestamp ceiled to this resolution + return a new Timestamp ceiled to this resolution. Parameters ---------- @@ -729,7 +729,7 @@ default 'raise' """) replace = _make_nat_func('replace', # noqa:E128 """ - implements datetime.replace, handles nanoseconds + implements datetime.replace, handles nanoseconds. Parameters ---------- diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index d24aafae0967d..52911f9fbcccd 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1150,7 +1150,7 @@ cdef class _Timedelta(timedelta): """ Format Timedelta as ISO 8601 Duration like ``P[n]Y[n]M[n]DT[n]H[n]M[n]S``, where the ``[n]`` s are replaced by the - values. See https://en.wikipedia.org/wiki/ISO_8601#Durations + values. See https://en.wikipedia.org/wiki/ISO_8601#Durations. .. versionadded:: 0.20.0 @@ -1314,7 +1314,7 @@ class Timedelta(_Timedelta): def round(self, freq): """ - Round the Timedelta to the specified resolution + Round the Timedelta to the specified resolution. Parameters ---------- @@ -1332,7 +1332,7 @@ class Timedelta(_Timedelta): def floor(self, freq): """ - return a new Timedelta floored to this resolution + return a new Timedelta floored to this resolution. Parameters ---------- @@ -1342,7 +1342,7 @@ class Timedelta(_Timedelta): def ceil(self, freq): """ - return a new Timedelta ceiled to this resolution + return a new Timedelta ceiled to this resolution. Parameters ---------- diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index c8c6efda30fae..6ca39d83afd25 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -441,7 +441,7 @@ class Timestamp(_Timestamp): def round(self, freq, ambiguous='raise', nonexistent='raise'): """ - Round the Timestamp to the specified resolution + Round the Timestamp to the specified resolution. Parameters ---------- @@ -483,7 +483,7 @@ default 'raise' def floor(self, freq, ambiguous='raise', nonexistent='raise'): """ - return a new Timestamp floored to this resolution + return a new Timestamp floored to this resolution. Parameters ---------- @@ -519,7 +519,7 @@ default 'raise' def ceil(self, freq, ambiguous='raise', nonexistent='raise'): """ - return a new Timestamp ceiled to this resolution + return a new Timestamp ceiled to this resolution. Parameters ---------- @@ -556,7 +556,7 @@ default 'raise' @property def tz(self): """ - Alias for tzinfo + Alias for tzinfo. """ return self.tzinfo @@ -754,7 +754,7 @@ default 'raise' def resolution(self): """ Return resolution describing the smallest difference between two - times that can be represented by Timestamp object_state + times that can be represented by Timestamp object_state. """ # GH#21336, GH#21365 return Timedelta(nanoseconds=1) @@ -893,7 +893,7 @@ default 'raise' hour=None, minute=None, second=None, microsecond=None, nanosecond=None, tzinfo=object, fold=0): """ - implements datetime.replace, handles nanoseconds + implements datetime.replace, handles nanoseconds. Parameters ----------
- [ ] xref #27977 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Add periods to Timestamp and Timedelta doctrings for the following attributes and methods: ``` pandas.Timestamp.resolution pandas.Timestamp.tz pandas.Timestamp.ceil pandas.Timestamp.combine pandas.Timestamp.floor pandas.Timestamp.fromordinal pandas.Timestamp.replace pandas.Timestamp.round pandas.Timestamp.weekday pandas.Timedelta.ceil pandas.Timedelta.floor pandas.Timedelta.isoformat pandas.Timedelta.round ``` I did not find the method for `pandas.Timestamp.isoweekday` this in the Timestamp source code. Was this deprecated ? @datapythonista
https://api.github.com/repos/pandas-dev/pandas/pulls/28053
2019-08-21T05:16:45Z
2019-09-28T07:39:08Z
2019-09-28T07:39:08Z
2019-09-28T07:39:09Z
Make DataFrame.to_string output full content by default
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst index 1f1dff417e68f..a6491c6645613 100644 --- a/doc/source/user_guide/options.rst +++ b/doc/source/user_guide/options.rst @@ -353,7 +353,7 @@ display.max_colwidth 50 The maximum width in charac a column in the repr of a pandas data structure. When the column overflows, a "..." placeholder is embedded in - the output. + the output. 'None' value means unlimited. display.max_info_columns 100 max_info_columns is used in DataFrame.info method to decide if per column information will be printed. diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 5b9e3a7dbad06..c78e27f098f13 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -21,6 +21,8 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +- :meth:`DataFrame.to_string` added the ``max_colwidth`` parameter to control when wide columns are truncated (:issue:`9784`) +- .. _whatsnew_1000.enhancements.other: @@ -191,6 +193,7 @@ I/O - Bug in :meth:`DataFrame.to_json` where using a Tuple as a column or index value and using ``orient="columns"`` or ``orient="index"`` would produce invalid JSON (:issue:`20500`) - Improve infinity parsing. :meth:`read_csv` now interprets ``Infinity``, ``+Infinity``, ``-Infinity`` as floating point values (:issue:`10065`) - Bug in :meth:`DataFrame.to_csv` where values were truncated when the length of ``na_rep`` was shorter than the text input data. (:issue:`25099`) +- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`) Plotting ^^^^^^^^ diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index dfc80140433f8..bc2eb3511629d 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -148,10 +148,10 @@ def use_numexpr_cb(key): """ max_colwidth_doc = """ -: int +: int or None The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, a "..." - placeholder is embedded in the output. + placeholder is embedded in the output. A 'None' value means unlimited. """ colheader_justify_doc = """ @@ -340,7 +340,9 @@ def is_terminal(): validator=is_instance_factory([type(None), int]), ) cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) - cf.register_option("max_colwidth", 50, max_colwidth_doc, validator=is_int) + cf.register_option( + "max_colwidth", 50, max_colwidth_doc, validator=is_nonnegative_int + ) if is_terminal(): max_cols = 0 # automatically determine optimal number of columns else: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f1ed3a125f60c..44d3d840016fe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -641,6 +641,7 @@ def __repr__(self): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") + max_colwidth = get_option("display.max_colwidth") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() @@ -652,6 +653,7 @@ def __repr__(self): min_rows=min_rows, max_cols=max_cols, line_width=width, + max_colwidth=max_colwidth, show_dimensions=show_dimensions, ) @@ -730,12 +732,17 @@ def to_string( show_dimensions=False, decimal=".", line_width=None, + max_colwidth=None, ): """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. + max_colwidth : int, optional + Max width to truncate each column in characters. By default, no limit. + + .. versionadded:: 1.0.0 %(returns)s See Also -------- @@ -752,26 +759,29 @@ def to_string( 2 3 6 """ - formatter = fmt.DataFrameFormatter( - self, - columns=columns, - col_space=col_space, - na_rep=na_rep, - formatters=formatters, - float_format=float_format, - sparsify=sparsify, - justify=justify, - index_names=index_names, - header=header, - index=index, - min_rows=min_rows, - max_rows=max_rows, - max_cols=max_cols, - show_dimensions=show_dimensions, - decimal=decimal, - line_width=line_width, - ) - return formatter.to_string(buf=buf) + from pandas import option_context + + with option_context("display.max_colwidth", max_colwidth): + formatter = fmt.DataFrameFormatter( + self, + columns=columns, + col_space=col_space, + na_rep=na_rep, + formatters=formatters, + float_format=float_format, + sparsify=sparsify, + justify=justify, + index_names=index_names, + header=header, + index=index, + min_rows=min_rows, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + decimal=decimal, + line_width=line_width, + ) + return formatter.to_string(buf=buf) # ---------------------------------------------------------------------- diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 76c01535a26e7..518b940ec5da3 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -131,7 +131,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation - with option_context("display.max_colwidth", 999999): + with option_context("display.max_colwidth", None): objstr = obj.to_string(**kwargs) else: objstr = str(obj) diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 8c4a7f4a1213d..50fa4796f8d72 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -377,7 +377,7 @@ def _write_header(self, indent: int) -> None: self.write("</thead>", indent) def _get_formatted_values(self) -> Dict[int, List[str]]: - with option_context("display.max_colwidth", 999999): + with option_context("display.max_colwidth", None): fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} return fmt_values diff --git a/pandas/tests/config/test_config.py b/pandas/tests/config/test_config.py index efaeb7b1471ec..51640641c78e6 100644 --- a/pandas/tests/config/test_config.py +++ b/pandas/tests/config/test_config.py @@ -218,6 +218,7 @@ def test_validation(self): self.cf.set_option("a", 2) # int is_int self.cf.set_option("b.c", "wurld") # str is_str self.cf.set_option("d", 2) + self.cf.set_option("d", None) # non-negative int can be None # None not is_int with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index c0451a0672c89..454e2afb8abe0 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -527,6 +527,45 @@ def test_str_max_colwidth(self): "1 foo bar stuff 1" ) + def test_to_string_truncate(self): + # GH 9784 - dont truncate when calling DataFrame.to_string + df = pd.DataFrame( + [ + { + "a": "foo", + "b": "bar", + "c": "let's make this a very VERY long line that is longer " + "than the default 50 character limit", + "d": 1, + }, + {"a": "foo", "b": "bar", "c": "stuff", "d": 1}, + ] + ) + df.set_index(["a", "b", "c"]) + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + with option_context("max_colwidth", 20): + # the display option has no effect on the to_string method + assert df.to_string() == ( + " a b " + " c d\n" + "0 foo bar let's make this a very VERY long line t" + "hat is longer than the default 50 character limit 1\n" + "1 foo bar " + " stuff 1" + ) + assert df.to_string(max_colwidth=20) == ( + " a b c d\n" + "0 foo bar let's make this ... 1\n" + "1 foo bar stuff 1" + ) + def test_auto_detect(self): term_width, term_height = get_terminal_size() fac = 1.05 # Arbitrary large factor to exceed term width
I modeled this off of https://github.com/pandas-dev/pandas/pull/24841. Some alternatives I considered: * Instead of setting the option_context here, we could wind the param into the depths of the formatter. I tried this, actually, and started finding a number of edge cases and bugs. I realized that the issue only occurs in a pretty narrow case - if the user is explicitly calling to_string - because most of the time, when representing a DataFrame, the user *will* want long strings truncated for readability. So I think the safest way is to do it at the top level without interfering with lower-level formatters. * Series.to_string() could arguably benefit from the same treatment, although that wasn't mentioned in the original issue (and I have never found the need to use it personally) so I didn't bring that in. Here's an example on a real dataset showing long columns preserved in a text file produced by to_string(): ![Screen Shot 2019-08-20 at 10 21 06 PM](https://user-images.githubusercontent.com/59923/63400279-d3f4ec80-c398-11e9-85cf-380a8981d726.png) Additional manual testing: * Main use case- by default, no limits and ignores the display options, but can still override: ``` >>> print(df.to_string()) A B 0 NaN NaN 1 -1.0000 foo 2 -2.1234 foooo 3 3.0000 fooooo 4 4.0000 bar >>> with option_context('display.max_colwidth', 5): ... print(df.to_string()) ... A B 0 NaN NaN 1 -1.0000 foo 2 -2.1234 foooo 3 3.0000 fooooo 4 4.0000 bar >>> print(df.to_string(max_colwidth=5)) A B 0 NaN NaN 1 -1... foo 2 -2... f... 3 3... f... 4 4... bar ``` * The string representation of DataFrame **does** still use the display options (so it's only the explicit ```to_string``` that doesn't: ``` >>> with option_context('display.max_colwidth', 5): ... print(str(df)) ... A B 0 NaN NaN 1 -1... foo 2 -2... f... 3 3... f... 4 4... bar ``` * The new parameter validates for None and positive ints, but rejects anything else: ``` >>> print(df.to_string(max_colwidth=-5)) ... raise ValueError(msg) ValueError: Value must be a nonnegative integer or None ``` - [ ] closes #9784 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28052
2019-08-21T03:10:21Z
2019-09-16T02:33:40Z
2019-09-16T02:33:40Z
2019-09-16T04:40:14Z
TST: fix compression tests when run without virtualenv/condaenv
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 16ca1109f266c..d68b6a1effaa0 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,6 +1,7 @@ import contextlib import os import subprocess +import sys import textwrap import warnings @@ -139,7 +140,7 @@ def test_with_missing_lzma(): import pandas """ ) - subprocess.check_output(["python", "-c", code]) + subprocess.check_output([sys.executable, "-c", code]) def test_with_missing_lzma_runtime(): @@ -156,4 +157,4 @@ def test_with_missing_lzma_runtime(): df.to_csv('foo.csv', compression='xz') """ ) - subprocess.check_output(["python", "-c", code]) + subprocess.check_output([sys.executable, "-c", code])
sys.executable is the pattern we use elsewhere in subprocess tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/28051
2019-08-21T02:46:32Z
2019-08-23T18:11:50Z
2019-08-23T18:11:50Z
2019-08-23T18:22:18Z
BUG: timedelta64(NaT) incorrectly treated as datetime in some dataframe ops
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index b02769322e013..173cc6b6b483c 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -180,7 +180,8 @@ Datetimelike - Addition and subtraction of integer or integer-dtype arrays with :class:`Timestamp` will now raise ``NullFrequencyError`` instead of ``ValueError`` (:issue:`28268`) - Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`) - Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`) - +- Bug in :class:`DataFrame` arithmetic operations when operating with a :class:`Series` with dtype `'timedelta64[ns]'` (:issue:`28049`) +- Timedelta ^^^^^^^^^ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index ca4f35514f2a5..f53b5045abff3 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -498,8 +498,19 @@ def column_op(a, b): # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) - def column_op(a, b): - return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))} + if right.dtype == "timedelta64[ns]": + # ensure we treat NaT values as the correct dtype + # Note: we do not do this unconditionally as it may be lossy or + # expensive for EA dtypes. + right = np.asarray(right) + + def column_op(a, b): + return {i: func(a.iloc[:, i], b[i]) for i in range(len(a.columns))} + + else: + + def column_op(a, b): + return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 706bc122c6d9e..fc3640503e385 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -457,6 +457,16 @@ def test_arith_flex_zero_len_raises(self): class TestFrameArithmetic: + def test_td64_op_nat_casting(self): + # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 + # when calling dispatch_to_series in DataFrame arithmetic + ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]") + df = pd.DataFrame([[1, 2], [3, 4]]) + + result = df * ser + expected = pd.DataFrame({0: ser, 1: ser}) + tm.assert_frame_equal(result, expected) + def test_df_add_2d_array_rowlike_broadcasts(self): # GH#23000 arr = np.arange(6).reshape(3, 2)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28049
2019-08-21T01:00:31Z
2019-09-20T23:36:14Z
2019-09-20T23:36:14Z
2019-09-20T23:45:26Z
BUG: retain extension dtypes in transpose
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b13aee238efb3..59cc161d12de7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -726,7 +726,16 @@ def transpose(self, *args, **kwargs): new_values = new_values.copy() nv.validate_transpose(tuple(), kwargs) - return self._constructor(new_values, **new_axes).__finalize__(self) + result = self._constructor(new_values, **new_axes).__finalize__(self) + + if self.ndim == 2 and self._is_homogeneous_type and len(self.columns): + if is_extension_array_dtype(self.dtypes.iloc[0]): + # Retain ExtensionArray dtypes through transpose; + # TODO: this can be made cleaner if/when (N, 1) EA are allowed + dtype = self.dtypes.iloc[0] + result = result.astype(dtype) + + return result def swapaxes(self, axis1, axis2, copy=True): """ diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 1ba0930c06334..140cd4086b8bd 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -139,12 +139,10 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): ts = pd.Timestamp.now(tz) ser = pd.Series([ts, pd.NaT]) - # FIXME: Can't transpose because that loses the tz dtype on - # the NaT column - obj = tm.box_expected(ser, box, transpose=False) + obj = tm.box_expected(ser, box) expected = pd.Series([True, False], dtype=np.bool_) - expected = tm.box_expected(expected, xbox, transpose=False) + expected = tm.box_expected(expected, xbox) result = obj == ts tm.assert_equal(result, expected) @@ -842,10 +840,8 @@ def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture): other = np.timedelta64("NaT") expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz) - # FIXME: fails with transpose=True due to tz-aware DataFrame - # transpose bug - obj = tm.box_expected(dti, box_with_array, transpose=False) - expected = tm.box_expected(expected, box_with_array, transpose=False) + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) result = obj + other tm.assert_equal(result, expected) diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index ed693d873efb8..30992747a11f5 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -755,18 +755,16 @@ def test_pi_sub_isub_offset(self): rng -= pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) - def test_pi_add_offset_n_gt1(self, box_transpose_fail): + def test_pi_add_offset_n_gt1(self, box): # GH#23215 # add offset to PeriodIndex with freq.n > 1 - box, transpose = box_transpose_fail - per = pd.Period("2016-01", freq="2M") pi = pd.PeriodIndex([per]) expected = pd.PeriodIndex(["2016-03"], freq="2M") - pi = tm.box_expected(pi, box, transpose=transpose) - expected = tm.box_expected(expected, box, transpose=transpose) + pi = tm.box_expected(pi, box) + expected = tm.box_expected(expected, box) result = pi + per.freq tm.assert_equal(result, expected) @@ -780,9 +778,8 @@ def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array): pi = pd.PeriodIndex(["2016-01"], freq="2M") expected = pd.PeriodIndex(["2016-04"], freq="2M") - # FIXME: with transposing these tests fail - pi = tm.box_expected(pi, box_with_array, transpose=False) - expected = tm.box_expected(expected, box_with_array, transpose=False) + pi = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) result = pi + to_offset("3M") tm.assert_equal(result, expected) @@ -984,16 +981,15 @@ def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq): with pytest.raises(IncompatibleFrequency, match=msg): rng -= other - def test_parr_add_sub_td64_nat(self, box_transpose_fail): + def test_parr_add_sub_td64_nat(self, box): # GH#23320 special handling for timedelta64("NaT") - box, transpose = box_transpose_fail pi = pd.period_range("1994-04-01", periods=9, freq="19D") other = np.timedelta64("NaT") expected = pd.PeriodIndex(["NaT"] * 9, freq="19D") - obj = tm.box_expected(pi, box, transpose=transpose) - expected = tm.box_expected(expected, box, transpose=transpose) + obj = tm.box_expected(pi, box) + expected = tm.box_expected(expected, box) result = obj + other tm.assert_equal(result, expected) @@ -1011,10 +1007,8 @@ def test_parr_add_sub_td64_nat(self, box_transpose_fail): TimedeltaArray._from_sequence(["NaT"] * 9), ], ) - def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other): - # FIXME: DataFrame fails because when when operating column-wise - # timedelta64 entries become NaT and are treated like datetimes - box = box_df_fail + def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other): + box = box_with_array pi = pd.period_range("1994-04-01", periods=9, freq="19D") expected = pd.PeriodIndex(["NaT"] * 9, freq="19D") diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index f3e61dffb500d..4ca1ee50e97b3 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -891,6 +891,22 @@ def test_no_warning(self, all_arithmetic_operators): class TestTranspose: + @pytest.mark.parametrize( + "ser", + [ + pd.date_range("2016-04-05 04:30", periods=3, tz="UTC"), + pd.period_range("1994", freq="A", periods=3), + pd.period_range("1969", freq="9s", periods=1), + pd.date_range("2016-04-05 04:30", periods=3).astype("category"), + pd.date_range("2016-04-05 04:30", periods=3, tz="UTC").astype("category"), + ], + ) + def test_transpose_retains_extension_dtype(self, ser): + # case with more than 1 column, must have same dtype + df = pd.DataFrame({"a": ser, "b": ser}) + result = df.T + assert (result.dtypes == ser.dtype).all() + def test_transpose_tzaware_1col_single_tz(self): # GH#26825 dti = pd.date_range("2016-04-05 04:30", periods=3, tz="UTC")
I'll have to look through the issues to see what this closes. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28048
2019-08-21T00:58:41Z
2019-12-06T00:14:46Z
null
2020-04-05T17:35:17Z
Issue 20927 fix resolves read_sas error for dates/datetimes greater than 2262-04-11
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 20e2cce1a3dfa..3e7666c8636b9 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -781,6 +781,7 @@ I/O timestamps with ``version="2.0"`` (:issue:`31652`). - Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`) - Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`) +- :func:`read_sas()` now handles dates and datetimes larger than :attr:`Timestamp.max` returning them as :class:`datetime.datetime` objects (:issue:`20927`) - Bug in :meth:`DataFrame.to_json` where ``Timedelta`` objects would not be serialized correctly with ``date_format="iso"`` (:issue:`28256`) - :func:`read_csv` will raise a ``ValueError`` when the column names passed in `parse_dates` are missing in the Dataframe (:issue:`31251`) - Bug in :meth:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`) diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 2bfcd500ee239..c8f1336bcec60 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -14,12 +14,12 @@ http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ from collections import abc -from datetime import datetime +from datetime import datetime, timedelta import struct import numpy as np -from pandas.errors import EmptyDataError +from pandas.errors import EmptyDataError, OutOfBoundsDatetime import pandas as pd @@ -29,6 +29,39 @@ from pandas.io.sas.sasreader import ReaderBase +def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series: + """ + Convert to Timestamp if possible, otherwise to datetime.datetime. + SAS float64 lacks precision for more than ms resolution so the fit + to datetime.datetime is ok. + + Parameters + ---------- + sas_datetimes : {Series, Sequence[float]} + Dates or datetimes in SAS + unit : {str} + "d" if the floats represent dates, "s" for datetimes + + Returns + ------- + Series + Series of datetime64 dtype or datetime.datetime. + """ + try: + return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01") + except OutOfBoundsDatetime: + if unit == "s": + return sas_datetimes.apply( + lambda sas_float: datetime(1960, 1, 1) + timedelta(seconds=sas_float) + ) + elif unit == "d": + return sas_datetimes.apply( + lambda sas_float: datetime(1960, 1, 1) + timedelta(days=sas_float) + ) + else: + raise ValueError("unit must be 'd' or 's'") + + class _subheader_pointer: pass @@ -706,15 +739,10 @@ def _chunk_to_dataframe(self): rslt[name] = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d") rslt[name] = np.asarray(rslt[name], dtype=np.float64) if self.convert_dates: - unit = None if self.column_formats[j] in const.sas_date_formats: - unit = "d" + rslt[name] = _convert_datetimes(rslt[name], "d") elif self.column_formats[j] in const.sas_datetime_formats: - unit = "s" - if unit: - rslt[name] = pd.to_datetime( - rslt[name], unit=unit, origin="1960-01-01" - ) + rslt[name] = _convert_datetimes(rslt[name], "s") jb += 1 elif self._column_types[j] == b"s": rslt[name] = self._string_chunk[js, :] diff --git a/pandas/tests/io/sas/data/max_sas_date.sas7bdat b/pandas/tests/io/sas/data/max_sas_date.sas7bdat new file mode 100644 index 0000000000000..b7838ebdcfeea Binary files /dev/null and b/pandas/tests/io/sas/data/max_sas_date.sas7bdat differ diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 62e9ac6929c8e..8c14f9de9f61c 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -3,6 +3,7 @@ import os from pathlib import Path +import dateutil.parser import numpy as np import pytest @@ -214,3 +215,94 @@ def test_zero_variables(datapath): fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError): pd.read_sas(fname) + + +def round_datetime_to_ms(ts): + if isinstance(ts, datetime): + return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000) + elif isinstance(ts, str): + _ts = dateutil.parser.parse(timestr=ts) + return _ts.replace(microsecond=int(round(_ts.microsecond, -3) / 1000) * 1000) + else: + return ts + + +def test_max_sas_date(datapath): + # GH 20927 + # NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999 + # but this is read as 29DEC9999:23:59:59.998993 by a buggy + # sas7bdat module + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + df = pd.read_sas(fname, encoding="iso-8859-1") + + # SAS likes to left pad strings with spaces - lstrip before comparing + df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x) + # GH 19732: Timestamps imported from sas will incur floating point errors + try: + df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") + except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: + df = df.applymap(round_datetime_to_ms) + except AttributeError: + df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) + # if there are any date/times > pandas.Timestamp.max then ALL in that chunk + # are returned as datetime.datetime + expected = pd.DataFrame( + { + "text": ["max", "normal"], + "dt_as_float": [253717747199.999, 1880323199.999], + "dt_as_dt": [ + datetime(9999, 12, 29, 23, 59, 59, 999000), + datetime(2019, 8, 1, 23, 59, 59, 999000), + ], + "date_as_float": [2936547.0, 21762.0], + "date_as_date": [datetime(9999, 12, 29), datetime(2019, 8, 1)], + }, + columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"], + ) + tm.assert_frame_equal(df, expected) + + +def test_max_sas_date_iterator(datapath): + # GH 20927 + # when called as an iterator, only those chunks with a date > pd.Timestamp.max + # are returned as datetime.datetime, if this happens that whole chunk is returned + # as datetime.datetime + col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"] + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + results = [] + for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1): + # SAS likes to left pad strings with spaces - lstrip before comparing + df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x) + # GH 19732: Timestamps imported from sas will incur floating point errors + try: + df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") + except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: + df = df.applymap(round_datetime_to_ms) + except AttributeError: + df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) + df.reset_index(inplace=True, drop=True) + results.append(df) + expected = [ + pd.DataFrame( + { + "text": ["max"], + "dt_as_float": [253717747199.999], + "dt_as_dt": [datetime(9999, 12, 29, 23, 59, 59, 999000)], + "date_as_float": [2936547.0], + "date_as_date": [datetime(9999, 12, 29)], + }, + columns=col_order, + ), + pd.DataFrame( + { + "text": ["normal"], + "dt_as_float": [1880323199.999], + "dt_as_dt": [np.datetime64("2019-08-01 23:59:59.999")], + "date_as_float": [21762.0], + "date_as_date": [np.datetime64("2019-08-01")], + }, + columns=col_order, + ), + ] + for result, expected in zip(results, expected): + tm.assert_frame_equal(result, expected)
- [x] closes #20927 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28047
2019-08-20T23:07:48Z
2020-05-25T21:47:53Z
2020-05-25T21:47:53Z
2023-10-27T16:40:11Z
Backport PR #27991 on branch 0.25.x (DataFrame html repr: also follow min_rows setting)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 44cc38662d976..9b3e7d24d9901 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -106,6 +106,7 @@ I/O ^^^ - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) +- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the html repr in the notebook (:issue:`27991`). - Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1f85b31be69b2..a69abaeacd18b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -673,15 +673,19 @@ def _repr_html_(self): if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") + min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") - return self.to_html( + formatter = fmt.DataFrameFormatter( + self, max_rows=max_rows, + min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, - notebook=True, ) + formatter.to_html(notebook=True) + return formatter.buf.getvalue() else: return None diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ad47f714c9550..83ef2bebd1b69 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -422,28 +422,35 @@ def test_repr_min_rows(self): # default setting no truncation even if above min_rows assert ".." not in repr(df) + assert ".." not in df._repr_html_() df = pd.DataFrame({"a": range(61)}) # default of max_rows 60 triggers truncation if above assert ".." in repr(df) + assert ".." in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 4): # truncated after first two rows assert ".." in repr(df) assert "2 " not in repr(df) + assert "..." in df._repr_html_() + assert "<td>2</td>" not in df._repr_html_() with option_context("display.max_rows", 12, "display.min_rows", None): # when set to None, follow value of max_rows assert "5 5" in repr(df) + assert "<td>5</td>" in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 12): # when set value higher as max_rows, use the minimum assert "5 5" not in repr(df) + assert "<td>5</td>" not in df._repr_html_() with option_context("display.max_rows", None, "display.min_rows", 12): # max_rows of None -> never truncate assert ".." not in repr(df) + assert ".." not in df._repr_html_() def test_str_max_colwidth(self): # GH 7856
Backport PR #27991: DataFrame html repr: also follow min_rows setting
https://api.github.com/repos/pandas-dev/pandas/pulls/28044
2019-08-20T19:24:57Z
2019-08-21T17:48:47Z
2019-08-21T17:48:46Z
2019-08-21T17:48:47Z
DOC: Add punctuation to IntervalArray docstrings
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 9cb2721b33634..7a14d6f1b619a 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -361,7 +361,7 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): _interval_shared_docs[ "from_tuples" ] = """ - Construct an %(klass)s from an array-like of tuples + Construct an %(klass)s from an array-like of tuples. Parameters ---------- @@ -854,7 +854,7 @@ def _format_space(self): def left(self): """ Return the left endpoints of each Interval in the IntervalArray as - an Index + an Index. """ return self._left @@ -862,7 +862,7 @@ def left(self): def right(self): """ Return the right endpoints of each Interval in the IntervalArray as - an Index + an Index. """ return self._right @@ -870,7 +870,7 @@ def right(self): def closed(self): """ Whether the intervals are closed on the left-side, right-side, both or - neither + neither. """ return self._closed @@ -878,7 +878,7 @@ def closed(self): "set_closed" ] = """ Return an %(klass)s identical to the current one, but closed on the - specified side + specified side. .. versionadded:: 0.24.0 @@ -917,7 +917,7 @@ def set_closed(self, closed): def length(self): """ Return an Index with entries denoting the length of each Interval in - the IntervalArray + the IntervalArray. """ try: return self.right - self.left @@ -945,7 +945,7 @@ def mid(self): ] = """ Return True if the %(klass)s is non-overlapping (no Intervals share points) and is either monotonic increasing or monotonic decreasing, - else False + else False. """ # https://github.com/python/mypy/issues/1362 # Mypy does not support decorated properties @@ -995,7 +995,7 @@ def __array__(self, dtype=None): _interval_shared_docs[ "to_tuples" ] = """ - Return an %(return_type)s of tuples of the form (left, right) + Return an %(return_type)s of tuples of the form (left, right). Parameters ----------
- [ ] xref #27979 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Added missing period to IntervalArray docstrings as suggested by @datapythonista for the following attributes and parameters: ``` pandas.arrays.IntervalArray.left pandas.arrays.IntervalArray.right pandas.arrays.IntervalArray.closed pandas.arrays.IntervalArray.mid pandas.arrays.IntervalArray.length pandas.arrays.IntervalArray.is_non_overlapping_monotonic pandas.arrays.IntervalArray.from_tuples pandas.arrays.IntervalArray.set_closed pandas.arrays.IntervalArray.to_tuples ```
https://api.github.com/repos/pandas-dev/pandas/pulls/28043
2019-08-20T19:14:21Z
2019-08-21T06:28:33Z
2019-08-21T06:28:33Z
2019-08-21T06:28:42Z
Backport PR #26419 on branch 0.25.x (Fix GroupBy nth Handling with Observed=False)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index ac3d645684fda..426822c19fd6f 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -122,6 +122,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) - Bug in windowing over read-only arrays (:issue:`27766`) - - diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 9aba9723e0546..b852513e454a2 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1771,7 +1771,11 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra if not self.as_index: return out - out.index = self.grouper.result_index[ids[mask]] + result_index = self.grouper.result_index + out.index = result_index[ids[mask]] + + if not self.observed and isinstance(result_index, CategoricalIndex): + out = out.reindex(result_index) return out.sort_index() if self.sort else out diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 99cc4cf0ffbd1..9750a36d9350b 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -434,6 +434,21 @@ def test_observed_groups_with_nan(observed): tm.assert_dict_equal(result, expected) +def test_observed_nth(): + # GH 26385 + cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) + ser = pd.Series([1, 2, 3]) + df = pd.DataFrame({"cat": cat, "ser": ser}) + + result = df.groupby("cat", observed=False)["ser"].nth(0) + + index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"]) + expected = pd.Series([1, np.nan, np.nan], index=index, name="ser") + expected.index.name = "cat" + + tm.assert_series_equal(result, expected) + + def test_dataframe_categorical_with_nan(observed): # GH 21151 s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
Backport PR #26419: Fix GroupBy nth Handling with Observed=False
https://api.github.com/repos/pandas-dev/pandas/pulls/28042
2019-08-20T19:02:23Z
2019-08-20T21:13:21Z
2019-08-20T21:13:21Z
2019-08-20T21:13:21Z
Backport PR #27664: BUG: added a check for if obj is instance of type …
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index ac3d645684fda..124a2ced534c4 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -91,7 +91,7 @@ Indexing Missing ^^^^^^^ -- +- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. `type(pandas.Series())` (:issue:`27482`) - - diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index bea73d72b91c9..37f790e41ad04 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -131,6 +131,8 @@ def _isna_new(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance( obj, ( @@ -169,6 +171,8 @@ def _isna_old(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index a688dec50bc95..bbc485ecf94f2 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -86,6 +86,10 @@ def test_isna_isnull(self, isna_f): assert not isna_f(np.inf) assert not isna_f(-np.inf) + # type + assert not isna_f(type(pd.Series())) + assert not isna_f(type(pd.DataFrame())) + # series for s in [ tm.makeFloatSeries(),
Manual backport of #27664
https://api.github.com/repos/pandas-dev/pandas/pulls/28041
2019-08-20T18:12:43Z
2019-08-20T19:16:53Z
2019-08-20T19:16:53Z
2019-08-20T19:16:57Z
Backport PR #28024 on branch 0.25.x (BUG: rfloordiv with fill_value, closes#27464)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index ac3d645684fda..a82df806a9de4 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -55,7 +55,7 @@ Numeric - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) - Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) -- +- Bug in :class:`DataFrame` arithmetic where missing values in results were incorrectly masked with ``NaN`` instead of ``Inf`` (:issue:`27464`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0570b9af2d256..1f85b31be69b2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -111,6 +111,7 @@ sanitize_index, to_arrays, ) +from pandas.core.ops.missing import dispatch_fill_zeros from pandas.core.series import Series from pandas.io.formats import console, format as fmt @@ -5365,7 +5366,9 @@ def _arith_op(left, right): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: - result = _arith_op(this.values, other.values) + with np.errstate(all="ignore"): + result = _arith_op(this.values, other.values) + result = dispatch_fill_zeros(func, this.values, other.values, result) return self._constructor( result, index=new_index, columns=new_columns, copy=False ) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2b23790e4ccd3..d686d9f90a5a4 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1227,3 +1227,36 @@ def test_addsub_arithmetic(self, dtype, delta): tm.assert_index_equal(index + index, 2 * index) tm.assert_index_equal(index - index, 0 * index) assert not (index - index).empty + + +def test_fill_value_inf_masking(): + # GH #27464 make sure we mask 0/1 with Inf and not NaN + df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]}) + + other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3]) + + result = df.rfloordiv(other, fill_value=1) + + expected = pd.DataFrame( + {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_div_silenced(): + # GH#26793 + pdf1 = pd.DataFrame( + { + "A": np.arange(10), + "B": [np.nan, 1, 2, 3, 4] * 2, + "C": [np.nan] * 10, + "D": np.arange(10), + }, + index=list("abcdefghij"), + columns=list("ABCD"), + ) + pdf2 = pd.DataFrame( + np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX") + ) + with tm.assert_produces_warning(None): + pdf1.div(pdf2, fill_value=0)
Backport PR #28024: BUG: rfloordiv with fill_value, closes#27464
https://api.github.com/repos/pandas-dev/pandas/pulls/28040
2019-08-20T17:45:16Z
2019-08-20T19:17:16Z
2019-08-20T19:17:16Z
2019-08-20T19:17:16Z
REF: boilerplate for ops internal consistency
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 73d1db9bda8ed..817972b3356a2 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -8,7 +8,7 @@ from pandas._config import get_option -from pandas._libs import algos as libalgos, hashtable as htable, lib +from pandas._libs import algos as libalgos, hashtable as htable from pandas.compat.numpy import function as nv from pandas.util._decorators import ( Appender, @@ -39,7 +39,7 @@ needs_i8_conversion, ) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna @@ -52,6 +52,7 @@ import pandas.core.common as com from pandas.core.construction import array, extract_array, sanitize_array from pandas.core.missing import interpolate_2d +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.sorting import nargsort from pandas.io.formats import console @@ -74,16 +75,14 @@ def _cat_compare_op(op): opname = "__{op}__".format(op=op.__name__) + @unpack_zerodim_and_defer(opname) def f(self, other): # On python2, you can usually compare any type to any type, and # Categoricals can be seen as a custom type, but having different # results depending whether categories are the same or not is kind of # insane, so be a bit stricter here and use the python3 idea of # comparing only things of equal type. - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - other = lib.item_from_zerodim(other) if is_list_like(other) and len(other) != len(self): # TODO: Could this fail if the categories are listlike objects? raise ValueError("Lengths must match.") diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 287ff9d618501..e52bc17fcc319 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -33,12 +33,7 @@ is_unsigned_integer_dtype, pandas_dtype, ) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCIndexClass, - ABCPeriodArray, - ABCSeries, -) +from pandas.core.dtypes.generic import ABCIndexClass, ABCPeriodArray, ABCSeries from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna @@ -46,6 +41,7 @@ from pandas.core import missing, nanops from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import make_invalid_op from pandas.tseries import frequencies @@ -1194,13 +1190,11 @@ def _time_shift(self, periods, freq=None): # to be passed explicitly. return self._generate_range(start=start, end=end, periods=None, freq=self.freq) + @unpack_zerodim_and_defer("__add__") def __add__(self, other): - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)): - return NotImplemented # scalar others - elif other is NaT: + if other is NaT: result = self._add_nat() elif isinstance(other, (Tick, timedelta, np.timedelta64)): result = self._add_delta(other) @@ -1248,13 +1242,11 @@ def __radd__(self, other): # alias for __add__ return self.__add__(other) + @unpack_zerodim_and_defer("__sub__") def __sub__(self, other): - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)): - return NotImplemented # scalar others - elif other is NaT: + if other is NaT: result = self._sub_nat() elif isinstance(other, (Tick, timedelta, np.timedelta64)): result = self._add_delta(-other) @@ -1343,11 +1335,11 @@ def __rsub__(self, other): return -(self - other) # FIXME: DTA/TDA/PA inplace methods should actually be inplace, GH#24115 - def __iadd__(self, other): + def __iadd__(self, other): # type: ignore # alias for __add__ return self.__add__(other) - def __isub__(self, other): + def __isub__(self, other): # type: ignore # alias for __sub__ return self.__sub__(other) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7cd103d12fa8a..8e3c727a14c99 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -40,12 +40,7 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCIndexClass, - ABCPandasArray, - ABCSeries, -) +from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -53,6 +48,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import get_period_alias, to_offset @@ -157,11 +153,8 @@ def _dt_array_cmp(cls, op): opname = "__{name}__".format(name=op.__name__) nat_result = opname == "__ne__" + @unpack_zerodim_and_defer(opname) def wrapper(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - - other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 41d8bffd8c131..e167e556b244a 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -21,12 +21,12 @@ is_scalar, ) from pandas.core.dtypes.dtypes import register_extension_dtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna, notna from pandas.core import nanops, ops from pandas.core.algorithms import take from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.tools.numeric import to_numeric @@ -602,13 +602,8 @@ def _values_for_argsort(self) -> np.ndarray: def _create_comparison_method(cls, op): op_name = op.__name__ + @unpack_zerodim_and_defer(op.__name__) def cmp_method(self, other): - - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - # Rely on pandas to unbox and dispatch to us. - return NotImplemented - - other = lib.item_from_zerodim(other) mask = None if isinstance(other, IntegerArray): @@ -697,15 +692,14 @@ def _maybe_mask_result(self, result, mask, other, op_name): def _create_arithmetic_method(cls, op): op_name = op.__name__ + @unpack_zerodim_and_defer(op.__name__) def integer_arithmetic_method(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - # Rely on pandas to unbox and dispatch to us. - return NotImplemented - - other = lib.item_from_zerodim(other) mask = None + if getattr(other, "ndim", 0) > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + if isinstance(other, IntegerArray): other, mask = other._data, other._mask diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 78cc54db4b1b8..fdf4059fad569 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -4,7 +4,6 @@ import numpy as np -from pandas._libs import lib from pandas._libs.tslibs import ( NaT, NaTType, @@ -35,7 +34,6 @@ ) from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, @@ -46,6 +44,7 @@ import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick @@ -69,13 +68,10 @@ def _period_array_cmp(cls, op): opname = "__{name}__".format(name=op.__name__) nat_result = opname == "__ne__" + @unpack_zerodim_and_defer(opname) def wrapper(self, other): ordinal_op = getattr(self.asi8, opname) - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - if is_list_like(other) and len(other) != len(self): raise ValueError("Lengths must match") diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 14024401ea110..943dea4252499 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -34,12 +34,7 @@ is_string_dtype, pandas_dtype, ) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCIndexClass, - ABCSeries, - ABCSparseArray, -) +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries, ABCSparseArray from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna import pandas.core.algorithms as algos @@ -49,6 +44,7 @@ from pandas.core.construction import sanitize_array from pandas.core.missing import interpolate_2d import pandas.core.ops as ops +from pandas.core.ops.common import unpack_zerodim_and_defer import pandas.io.formats.printing as printing @@ -1410,12 +1406,8 @@ def sparse_unary_method(self): def _create_arithmetic_method(cls, op): op_name = op.__name__ + @unpack_zerodim_and_defer(op_name) def sparse_arithmetic_method(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - # Rely on pandas to dispatch to us. - return NotImplemented - - other = lib.item_from_zerodim(other) if isinstance(other, SparseArray): return _sparse_array_op(self, other, op, op_name) @@ -1463,12 +1455,9 @@ def _create_comparison_method(cls, op): if op_name in {"and_", "or_"}: op_name = op_name[:-1] + @unpack_zerodim_and_defer(op_name) def cmp_method(self, other): - if isinstance(other, (ABCSeries, ABCIndexClass)): - # Rely on pandas to unbox and dispatch to us. - return NotImplemented - if not is_scalar(other) and not isinstance(other, type(self)): # convert list-like to ndarray other = np.asarray(other) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 21e07b5101a64..816beb758dd33 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -45,6 +45,7 @@ from pandas.core import nanops from pandas.core.algorithms import checked_add_with_arr import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import to_offset @@ -82,10 +83,8 @@ def _td_array_cmp(cls, op): opname = "__{name}__".format(name=op.__name__) nat_result = opname == "__ne__" + @unpack_zerodim_and_defer(opname) def wrapper(self, other): - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented if _is_convertible_to_td(other) or other is NaT: try: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 962ba8cc00557..f5cb435b8c1c2 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -21,7 +21,7 @@ is_scalar, is_timedelta64_dtype, ) -from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCTimedeltaIndex +from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops import pandas.core.common as com @@ -29,6 +29,7 @@ import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.io.formats.printing import pprint_thing @@ -734,9 +735,8 @@ def __getitem__(self, key): # fall back to Int64Index return super().__getitem__(key) + @unpack_zerodim_and_defer("__floordiv__") def __floordiv__(self, other): - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented if is_integer(other) and other != 0: if len(self) == 0 or self.start % other == 0 and self.step % other == 0: @@ -772,10 +772,9 @@ def _make_evaluate_binop(op, step=False): if False, use the existing step """ + @unpack_zerodim_and_defer(op.__name__) def _evaluate_numeric_binop(self, other): - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - elif isinstance(other, ABCTimedeltaIndex): + if isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 398fa9b0c1fc0..f7a1258894b89 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -29,6 +29,7 @@ logical_op, ) from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY # noqa:F401 +from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401 from pandas.core.ops.dispatch import should_series_dispatch from pandas.core.ops.docstrings import ( @@ -489,9 +490,8 @@ def _arith_method_SERIES(cls, op, special): op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) + @unpack_zerodim_and_defer(op_name) def wrapper(left, right): - if isinstance(right, ABCDataFrame): - return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) @@ -512,14 +512,11 @@ def _comp_method_SERIES(cls, op, special): """ op_name = _get_op_name(op, special) + @unpack_zerodim_and_defer(op_name) def wrapper(self, other): res_name = get_op_result_name(self, other) - if isinstance(other, ABCDataFrame): # pragma: no cover - # Defer to DataFrame implementation; fail early - return NotImplemented - if isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled Series objects") @@ -541,14 +538,11 @@ def _bool_method_SERIES(cls, op, special): """ op_name = _get_op_name(op, special) + @unpack_zerodim_and_defer(op_name) def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) - if isinstance(other, ABCDataFrame): - # Defer to DataFrame implementation; fail early - return NotImplemented - lvalues = extract_array(self, extract_numpy=True) rvalues = extract_array(other, extract_numpy=True) diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py new file mode 100644 index 0000000000000..f4b16cf4a0cf2 --- /dev/null +++ b/pandas/core/ops/common.py @@ -0,0 +1,66 @@ +""" +Boilerplate functions used in defining binary operations. +""" +from functools import wraps + +from pandas._libs.lib import item_from_zerodim + +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries + + +def unpack_zerodim_and_defer(name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Parameters + ---------- + name : str + + Returns + ------- + decorator + """ + + def wrapper(method): + return _unpack_zerodim_and_defer(method, name) + + return wrapper + + +def _unpack_zerodim_and_defer(method, name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Ensure method returns NotImplemented when operating against "senior" + classes. Ensure zero-dimensional ndarrays are always unpacked. + + Parameters + ---------- + method : binary method + name : str + + Returns + ------- + method + """ + + is_cmp = name.strip("__") in {"eq", "ne", "lt", "le", "gt", "ge"} + + @wraps(method) + def new_method(self, other): + + if is_cmp and isinstance(self, ABCIndexClass) and isinstance(other, ABCSeries): + # For comparison ops, Index does *not* defer to Series + pass + else: + for cls in [ABCDataFrame, ABCSeries, ABCIndexClass]: + if isinstance(self, cls): + break + if isinstance(other, cls): + return NotImplemented + + other = item_from_zerodim(other) + + return method(self, other) + + return new_method diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 4d3d6e2df35db..1ba0930c06334 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1029,6 +1029,7 @@ def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array): [ "unsupported operand type", "cannot (add|subtract)", + "cannot use operands with types", "ufunc '?(add|subtract)'? cannot use operands with types", ] )
Progress towards #23853, with more progress available pending resolution of #27911.
https://api.github.com/repos/pandas-dev/pandas/pulls/28037
2019-08-20T15:12:34Z
2019-11-14T16:31:55Z
2019-11-14T16:31:55Z
2019-11-14T16:36:00Z
CLN: small ops optimizations
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 97567192aa17a..04ddc9a53ad04 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5298,12 +5298,19 @@ def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join="outer", level=level, copy=False) new_index, new_columns = this.index, this.columns - def _arith_op(left, right): - # for the mixed_type case where we iterate over columns, - # _arith_op(left, right) is equivalent to - # left._binop(right, func, fill_value=fill_value) - left, right = ops.fill_binop(left, right, fill_value) - return func(left, right) + if fill_value is None: + # since _arith_op may be called in a loop, avoid function call + # overhead if possible by doing this check once + _arith_op = func + + else: + + def _arith_op(left, right): + # for the mixed_type case where we iterate over columns, + # _arith_op(left, right) is equivalent to + # left._binop(right, func, fill_value=fill_value) + left, right = ops.fill_binop(left, right, fill_value) + return func(left, right) if ops.should_series_dispatch(this, other, func): # iterate over columns @@ -5318,7 +5325,7 @@ def _arith_op(left, right): def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join="outer", axis=0, level=level, copy=False) - assert left.index.equals(right.index) + # at this point we have `left.index.equals(right.index)` if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` @@ -5331,14 +5338,13 @@ def _combine_match_index(self, other, func, level=None): new_data, index=left.index, columns=self.columns, copy=False ) - def _combine_match_columns(self, other, func, level=None): - assert isinstance(other, Series) + def _combine_match_columns(self, other: Series, func, level=None): left, right = self.align(other, join="outer", axis=1, level=level, copy=False) - assert left.columns.equals(right.index) + # at this point we have `left.columns.equals(right.index)` return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func): - assert lib.is_scalar(other) or np.ndim(other) == 0 + # scalar other or np.ndim(other) == 0 return ops.dispatch_to_series(self, other, func) def combine(self, other, func, fill_value=None, overwrite=True): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 7e03b9544ee72..86cd6e878cde6 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -169,7 +169,7 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') return Timedelta(obj) - elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): + elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj.dtype): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 @@ -415,7 +415,7 @@ def should_extension_dispatch(left: ABCSeries, right: Any) -> bool: ): return True - if is_extension_array_dtype(right) and not is_scalar(right): + if not is_scalar(right) and is_extension_array_dtype(right): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" return True @@ -755,7 +755,7 @@ def na_op(x, y): assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here - assert not (is_bool_dtype(x) and is_bool_dtype(y)) + assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) @@ -804,7 +804,7 @@ def wrapper(self, other): else: # scalars, list, tuple, np.array - is_other_int_dtype = is_integer_dtype(np.asarray(other)) + is_other_int_dtype = is_integer_dtype(np.asarray(other).dtype) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong @@ -988,10 +988,10 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): self, other, pass_op, fill_value=fill_value, axis=axis, level=level ) else: + # in this case we always have `np.ndim(other) == 0` if fill_value is not None: self = self.fillna(fill_value) - assert np.ndim(other) == 0 return self._combine_const(other, op) f.__name__ = op_name @@ -1032,7 +1032,7 @@ def f(self, other, axis=default_axis, level=None): self, other, na_op, fill_value=None, axis=axis, level=level ) else: - assert np.ndim(other) == 0, other + # in this case we always have `np.ndim(other) == 0` return self._combine_const(other, na_op) f.__name__ = op_name diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 523ba5d42a69c..f5f6d77676f1f 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -11,7 +11,7 @@ find_common_type, maybe_upcast_putmask, ) -from pandas.core.dtypes.common import is_object_dtype, is_period_dtype, is_scalar +from pandas.core.dtypes.common import is_object_dtype, is_scalar from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.dtypes.missing import notna @@ -57,9 +57,9 @@ def masked_arith_op(x, y, op): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) - # PeriodIndex.ravel() returns int64 dtype, so we have - # to work around that case. See GH#19956 - yrav = y if is_period_dtype(y) else y.ravel() + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: @@ -82,9 +82,9 @@ def masked_arith_op(x, y, op): mask = notna(xrav) # 1 ** np.nan is 1. So we have to unmask those. - if op == pow: + if op is pow: mask = np.where(x == 1, False, mask) - elif op == rpow: + elif op is rpow: mask = np.where(y == 1, False, mask) if mask.any(): diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 01bc345a40b83..45fa6a2830af6 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -40,7 +40,7 @@ def fill_zeros(result, x, y, name, fill): Mask the nan's from x. """ - if fill is None or is_float_dtype(result): + if fill is None or is_float_dtype(result.dtype): return result if name.startswith(("r", "__r")): @@ -55,7 +55,7 @@ def fill_zeros(result, x, y, name, fill): if is_scalar_type: y = np.array(y) - if is_integer_dtype(y): + if is_integer_dtype(y.dtype): if (y == 0).any(): diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f5add426297a7..8fe6850c84b8b 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -569,13 +569,13 @@ def _combine_frame(self, other, func, fill_value=None, level=None): ).__finalize__(self) def _combine_match_index(self, other, func, level=None): - new_data = {} if level is not None: raise NotImplementedError("'level' argument is not supported") this, other = self.align(other, join="outer", axis=0, level=level, copy=False) + new_data = {} for col, series in this.items(): new_data[col] = func(series.values, other.values)
@jorisvandenbossche you've mentioned some ops optimizations; any suggestions for things to add here?
https://api.github.com/repos/pandas-dev/pandas/pulls/28036
2019-08-20T14:42:49Z
2019-08-26T23:39:13Z
2019-08-26T23:39:13Z
2019-08-27T00:19:49Z
Backport PR #27481 on branch 0.25.x (Correctly re-instate Matplotlib converters)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index dffe4ceb4a218..ac3d645684fda 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -112,6 +112,9 @@ Plotting ^^^^^^^^ - Added a pandas_plotting_backends entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). +- Fixed the re-instatement of Matplotlib datetime converters after calling + `pandas.plotting.deregister_matplotlib_converters()` (:issue:`27481`). +- - Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`). - diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 15648d59c8f98..893854ab26e37 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -64,11 +64,12 @@ def register(explicit=True): pairs = get_pairs() for type_, cls in pairs: - converter = cls() - if type_ in units.registry: + # Cache previous converter if present + if type_ in units.registry and not isinstance(units.registry[type_], cls): previous = units.registry[type_] _mpl_units[type_] = previous - units.registry[type_] = converter + # Replace with pandas converter + units.registry[type_] = cls() def deregister(): diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 35d12706f0590..7001264c41c05 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -40,6 +40,21 @@ def test_initial_warning(): assert "Using an implicitly" in out +def test_registry_mpl_resets(): + # Check that Matplotlib converters are properly reset (see issue #27481) + code = ( + "import matplotlib.units as units; " + "import matplotlib.dates as mdates; " + "n_conv = len(units.registry); " + "import pandas as pd; " + "pd.plotting.register_matplotlib_converters(); " + "pd.plotting.deregister_matplotlib_converters(); " + "assert len(units.registry) == n_conv" + ) + call = [sys.executable, "-c", code] + subprocess.check_output(call) + + def test_timtetonum_accepts_unicode(): assert converter.time2num("00:01") == converter.time2num("00:01")
Backport PR #27481: Correctly re-instate Matplotlib converters
https://api.github.com/repos/pandas-dev/pandas/pulls/28035
2019-08-20T14:27:05Z
2019-08-20T16:18:41Z
2019-08-20T16:18:41Z
2019-08-20T16:18:42Z
Add support to names keyword in Index
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4e098b2f8be9b..69c89635cc828 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -176,10 +176,12 @@ class Index(IndexOpsMixin, PandasObject): Otherwise, an error will be raised. copy : bool Make a copy of input ndarray - name : object + name : object, optional Name to be stored in the index tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible + names : tuple of objects, optional + Names to be stored in the index (only accepts tuple of length 1) See Also -------- @@ -261,10 +263,18 @@ def __new__( name=None, fastpath=None, tupleize_cols=True, + names=None, **kwargs ): - if name is None and hasattr(data, "name"): + if names is not None: + if name is not None: + raise TypeError("Using name and names is unsupported") + elif len(names) > 1: + raise TypeError("names must be list-like of size 1") + # infer name from names when MultiIndex cannot be created + name = names[0] + elif hasattr(data, "name") and name is None: name = data.name if fastpath is not None: @@ -492,9 +502,7 @@ def __new__( # 10697 from .multi import MultiIndex - return MultiIndex.from_tuples( - data, names=name or kwargs.get("names") - ) + return MultiIndex.from_tuples(data, names=names or name) # other iterable of some kind subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index d1ed79118d2fa..d116b45e20121 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -371,6 +371,25 @@ def test_constructor_simple_new(self, vals, dtype): result = index._simple_new(index.values, dtype) tm.assert_index_equal(result, index) + def test_constructor_names(self): + # test both `name` and `names` + with pytest.raises(TypeError): + idx = Index([1, 2, 3], name="a", names=("a",)) + + # test list-like with length > 1 + with pytest.raises(TypeError): + idx = Index([1, 2, 3], names=("a", "b")) + + # test using `name` for a flat `Index` + idx = Index([1, 2, 3], name="a") + assert idx.name == "a" + assert idx.names == ("a",) + + # test using `names` for a flat `Index` + idx = Index([1, 2, 3], names=("a",)) + assert idx.name == "a" + assert idx.names == ("a",) + @pytest.mark.parametrize( "vals", [
- [x] closes #19082 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28032
2019-08-20T11:19:13Z
2019-10-03T19:09:56Z
null
2019-10-03T19:09:56Z
DOC: Fix docstrings lack of punctuation
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 5c121172d0e4f..0778b6726d104 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -514,7 +514,7 @@ def fillna(self, value=None, method=None, limit=None): def dropna(self): """ - Return ExtensionArray without NA values + Return ExtensionArray without NA values. Returns ------- @@ -957,7 +957,7 @@ def _concat_same_type( cls, to_concat: Sequence[ABCExtensionArray] ) -> ABCExtensionArray: """ - Concatenate multiple array + Concatenate multiple array. Parameters ---------- diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 093334a815938..70df708d36b3b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1158,7 +1158,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None): def to_pydatetime(self): """ Return Datetime Array/Index as object ndarray of datetime.datetime - objects + objects. Returns ------- @@ -1283,7 +1283,7 @@ def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified - freq. Used for vectorized offsets + freq. Used for vectorized offsets. Parameters ---------- diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 20ce11c70c344..f2d74794eadf5 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -426,7 +426,7 @@ def __array__(self, dtype=None): @property def is_leap_year(self): """ - Logical indicating if the date belongs to a leap year + Logical indicating if the date belongs to a leap year. """ return isleapyear_arr(np.asarray(self.year)) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 51daad3b42649..272066d476ce3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -661,7 +661,7 @@ def _get_time_micros(self): def to_series(self, keep_tz=None, index=None, name=None): """ Create a Series with both index and values equal to the index keys - useful with map for returning an indexer based on an index + useful with map for returning an indexer based on an index. Parameters ---------- @@ -687,10 +687,10 @@ def to_series(self, keep_tz=None, index=None, name=None): behaviour and silence the warning. index : Index, optional - index of resulting Series. If None, defaults to original index - name : string, optional - name of resulting Series. If None, defaults to name of original - index + Index of resulting Series. If None, defaults to original index. + name : str, optional + Name of resulting Series. If None, defaults to name of original + index. Returns ------- @@ -735,7 +735,7 @@ def to_series(self, keep_tz=None, index=None, name=None): def snap(self, freq="S"): """ - Snap time stamps to nearest occurring frequency + Snap time stamps to nearest occurring frequency. Returns ------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index b614952ba1e04..761862b9f30e9 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1250,7 +1250,7 @@ def _set_names(self, names, level=None, validate=True): self.levels[l].rename(name, inplace=True) names = property( - fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex\n""" + fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n""" ) @Appender(_index_shared_docs["_get_grouper_for_level"]) @@ -1762,7 +1762,7 @@ def is_all_dates(self): def is_lexsorted(self): """ - Return True if the codes are lexicographically sorted + Return True if the codes are lexicographically sorted. Returns ------- @@ -2246,7 +2246,7 @@ def swaplevel(self, i=-2, j=-1): def reorder_levels(self, order): """ - Rearrange levels using input order. May not drop or duplicate levels + Rearrange levels using input order. May not drop or duplicate levels. Parameters ---------- diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index d06afa3daa792..8cf14e2ca777e 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -68,20 +68,20 @@ class TimedeltaIndex( ): """ Immutable ndarray of timedelta64 data, represented internally as int64, and - which can be boxed to timedelta objects + which can be boxed to timedelta objects. Parameters ---------- data : array-like (1-dimensional), optional - Optional timedelta-like data to construct index with + Optional timedelta-like data to construct index with. unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional - which is an integer/float number - freq : string or pandas offset object, optional + Which is an integer/float number. + freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the - inferred frequency upon creation + inferred frequency upon creation. copy : bool - Make a copy of input ndarray + Make a copy of input ndarray. start : starting value, timedelta-like, optional If data is None, start is used as the start point in generating regular timedelta data. @@ -90,24 +90,24 @@ class TimedeltaIndex( periods : int, optional, > 0 Number of periods to generate, if generating index. Takes precedence - over end argument + over end argument. .. deprecated:: 0.24.0 end : end time, timedelta-like, optional If periods is none, generated index will extend to first conforming - time on or just past end argument + time on or just past end argument. .. deprecated:: 0.24. 0 - closed : string or None, default None + closed : str or None, default None Make the interval closed with respect to the given frequency to - the 'left', 'right', or both sides (None) + the 'left', 'right', or both sides (None). .. deprecated:: 0.24. 0 name : object - Name to be stored in the index + Name to be stored in the index. Attributes ---------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ea00737f776ee..9bdf312c917f0 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -49,7 +49,7 @@ def get_indexers_list(): # the public IndexSlicerMaker class _IndexSlice: """ - Create an object to more easily perform multi-index slicing + Create an object to more easily perform multi-index slicing. See Also --------
- [ ] xref #27977 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Similar to issue #27979, and as pointed out by @datapythonista, the summaries of some dosctrings don’t end with a period. I added the period to the following cases: ``` • pandas.IndexSlice • pandas.MultiIndex.names • pandas.MultiIndex.is_lexsorted • pandas.MultiIndex.reorder_levels • pandas.DatetimeIndex.snap • pandas.DatetimeIndex.to_perioddelta • pandas.DatetimeIndex.to_pydatetime • pandas.DatetimeIndex.to_series • pandas.TimedeltaIndex • pandas.PeriodIndex.is_leap_year • pandas.api.extensions.ExtensionArray._concat_same_type • pandas.api.extensions.ExtensionArray.dropna ``` These are the outputs of validate_docstrings.py when evaluating each case: **pandas.IndexSlice** ``` 4 Errors found: Examples do not pass tests flake8 error: E231 missing whitespace after ',' (4 times) flake8 error: E902 TokenError: EOF in multi-line statement flake8 error: E999 SyntaxError: invalid syntax 1 Warnings found: No extended summary found ``` **pandas.MultiIndex.names** ``` 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.MultiIndex.is_lexsorted** ``` 1 Errors found: Return value has no description 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.MultiIndex.reorder_levels** ``` 2 Errors found: Parameters {order} not documented Return value has no description 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.DatetimeIndex.snap** ``` 2 Errors found: Parameters {freq} not documented Return value has no description 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.DatetimeIndex.to_perioddelta** ``` 5 Errors found: Summary should fit in a single line Parameters {**kwargs, *args} not documented Unknown parameters {freq} Parameter "freq" has no description Return value has no description 2 Warnings found: See Also section not found No examples section found ``` **pandas.DatetimeIndex.to_pydatetime** ``` 4 Errors found: Summary should fit in a single line Parameters {**kwargs, *args} not documented The first line of the Returns section should contain only the type, unless multiple values are being returned Return value has no description 2 Warnings found: See Also section not found No examples section found ``` **pandas.DatetimeIndex.to_series** ``` 2 Errors found: Summary should fit in a single line Return value has no description 2 Warnings found: See Also section not found No examples section found ``` **pandas.TimedeltaIndex** ``` 3 Errors found: Summary should fit in a single line Parameters {dtype, verify_integrity, periods, copy, data} not documented Unknown parameters {periods , copy , data } 1 Warnings found: No examples section found ``` **pandas.PeriodIndex.is_leap_year** ``` 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.api.extensions.ExtensionArray._concat_same_type** ``` 2 Errors found: Parameter "to_concat" has no description Return value has no description 3 Warnings found: No extended summary found See Also section not found No examples section found ``` **pandas.api.extensions.ExtensionArray.dropna** ``` 2 Errors found: The first line of the Returns section should contain only the type, unless multiple values are being returned Return value has no description 3 Warnings found: No extended summary found See Also section not found No examples section found ``` I would be happy to also work on the other errors/warnings in this PR on another.
https://api.github.com/repos/pandas-dev/pandas/pulls/28031
2019-08-20T10:02:38Z
2019-08-23T09:03:01Z
2019-08-23T09:03:01Z
2019-08-23T09:03:11Z
Backport PR #27916 on branch 0.25.x (BUG: fix to_timestamp out_of_bounds)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index cec927d73edca..81ba462d83840 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -32,7 +32,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) -- +- Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`) - - diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c68d686ff2bf2..98e55f50062a2 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -21,7 +21,8 @@ PyDateTime_IMPORT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, - pandas_datetime_to_datetimestruct, NPY_DATETIMEUNIT, NPY_FR_D) + pandas_datetime_to_datetimestruct, check_dts_bounds, + NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "src/datetime/np_datetime.h": int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, @@ -1011,7 +1012,7 @@ def dt64arr_to_periodarr(int64_t[:] dtarr, int freq, tz=None): @cython.wraparound(False) @cython.boundscheck(False) -def periodarr_to_dt64arr(int64_t[:] periodarr, int freq): +def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq): """ Convert array to datetime64 values from a set of ordinals corresponding to periods per period convention. @@ -1024,9 +1025,8 @@ def periodarr_to_dt64arr(int64_t[:] periodarr, int freq): out = np.empty(l, dtype='i8') - with nogil: - for i in range(l): - out[i] = period_ordinal_to_dt64(periodarr[i], freq) + for i in range(l): + out[i] = period_ordinal_to_dt64(periodarr[i], freq) return out.base # .base to access underlying np.ndarray @@ -1179,7 +1179,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, return get_period_ordinal(&dts, freq) -cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: +cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1: cdef: npy_datetimestruct dts @@ -1187,6 +1187,7 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: return NPY_NAT get_date_info(ordinal, freq, &dts) + check_dts_bounds(&dts) return dtstruct_to_dt64(&dts) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index d9646feaf661e..9c9f976a90f51 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas._libs import OutOfBoundsDatetime + import pandas as pd from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray import pandas.util.testing as tm @@ -608,6 +610,15 @@ def test_to_timestamp(self, how, period_index): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) + def test_to_timestamp_out_of_bounds(self): + # GH#19643 previously overflowed silently + pi = pd.period_range("1500", freq="Y", periods=3) + with pytest.raises(OutOfBoundsDatetime): + pi.to_timestamp() + + with pytest.raises(OutOfBoundsDatetime): + pi._data.to_timestamp() + @pytest.mark.parametrize("propname", PeriodArray._bool_ops) def test_bool_properties(self, period_index, propname): # in this case _bool_ops is just `is_leap_year` diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index 4cff061cabc40..357274e724c68 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -30,11 +30,8 @@ def test_asfreq_near_zero_weekly(self): assert week1.asfreq("D", "E") >= per1 assert week2.asfreq("D", "S") <= per2 - @pytest.mark.xfail( - reason="GH#19643 period_helper asfreq functions fail to check for overflows" - ) def test_to_timestamp_out_of_bounds(self): - # GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848') + # GH#19643, used to incorrectly give Timestamp in 1754 per = Period("0001-01-01", freq="B") with pytest.raises(OutOfBoundsDatetime): per.to_timestamp()
Backport PR #27916: BUG: fix to_timestamp out_of_bounds
https://api.github.com/repos/pandas-dev/pandas/pulls/28030
2019-08-20T07:00:55Z
2019-08-20T13:32:43Z
2019-08-20T13:32:43Z
2019-08-20T13:32:43Z
DOC: Change document code prun in a row
diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index b77bfb9778837..a4eefadd54d8c 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -243,9 +243,9 @@ We've gotten another big improvement. Let's check again where the time is spent: .. ipython:: python - %prun -l 4 apply_integrate_f(df['a'].to_numpy(), - df['b'].to_numpy(), - df['N'].to_numpy()) + %%prun -l 4 apply_integrate_f(df['a'].to_numpy(), + df['b'].to_numpy(), + df['N'].to_numpy()) As one might expect, the majority of the time is now spent in ``apply_integrate_f``, so if we wanted to make anymore efficiencies we must continue to concentrate our
- [x] closes #28026 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28029
2019-08-20T06:51:22Z
2019-08-21T06:43:04Z
2019-08-21T06:43:03Z
2019-08-21T06:43:26Z
Backport PR #27926 on branch 0.25.x (Fix regression in .ix fallback with IntervalIndex)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index cec927d73edca..a21955ea9aaac 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -85,6 +85,7 @@ Indexing - Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`) - Break reference cycle involving :class:`Index` and other index classes to allow garbage collection of index objects without running the GC. (:issue:`27585`, :issue:`27840`) - Fix regression in assigning values to a single column of a DataFrame with a ``MultiIndex`` columns (:issue:`27841`). +- Fix regression in ``.ix`` fallback with an ``IntervalIndex`` (:issue:`27865`). - Missing diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 71985c0707095..b37d903699327 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -123,7 +123,7 @@ def __getitem__(self, key): key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) - except (KeyError, TypeError, InvalidIndexError): + except (KeyError, TypeError, InvalidIndexError, AttributeError): # TypeError occurs here if the key has non-hashable entries, # generally slice or list. # TODO(ix): most/all of the TypeError cases here are for ix, @@ -131,6 +131,9 @@ def __getitem__(self, key): # The InvalidIndexError is only catched for compatibility # with geopandas, see # https://github.com/pandas-dev/pandas/issues/27258 + # TODO: The AttributeError is for IntervalIndex which + # incorrectly implements get_value, see + # https://github.com/pandas-dev/pandas/issues/27865 pass else: if is_scalar(values): diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index 45ccd8d1b8fb3..6029db8ed66f6 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -343,3 +343,13 @@ def test_ix_duplicate_returns_series(self): r = df.ix[0.2, "a"] e = df.loc[0.2, "a"] tm.assert_series_equal(r, e) + + def test_ix_intervalindex(self): + # https://github.com/pandas-dev/pandas/issues/27865 + df = DataFrame( + np.random.randn(5, 2), + index=pd.IntervalIndex.from_breaks([-np.inf, 0, 1, 2, 3, np.inf]), + ) + result = df.ix[0:2, 0] + expected = df.iloc[0:2, 0] + tm.assert_series_equal(result, expected)
Backport PR #27926: Fix regression in .ix fallback with IntervalIndex
https://api.github.com/repos/pandas-dev/pandas/pulls/28028
2019-08-20T06:48:03Z
2019-08-20T13:32:27Z
2019-08-20T13:32:27Z
2019-08-20T13:32:27Z
REF: standardize usage in DataFrame vs SparseDataFrame ops
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 16fece1c7eb8b..6aa3690ef54ea 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5306,7 +5306,6 @@ def reorder_levels(self, order, axis=0): def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join="outer", level=level, copy=False) - new_index, new_columns = this.index, this.columns if fill_value is None: # since _arith_op may be called in a loop, avoid function call @@ -5324,14 +5323,12 @@ def _arith_op(left, right): if ops.should_series_dispatch(this, other, func): # iterate over columns - return ops.dispatch_to_series(this, other, _arith_op) + new_data = ops.dispatch_to_series(this, other, _arith_op) else: with np.errstate(all="ignore"): - result = _arith_op(this.values, other.values) - result = dispatch_fill_zeros(func, this.values, other.values, result) - return self._constructor( - result, index=new_index, columns=new_columns, copy=False - ) + res_values = _arith_op(this.values, other.values) + new_data = dispatch_fill_zeros(func, this.values, other.values, res_values) + return this._construct_result(other, new_data, _arith_op) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join="outer", axis=0, level=level, copy=False) @@ -5339,23 +5336,49 @@ def _combine_match_index(self, other, func, level=None): if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` - return ops.dispatch_to_series(left, right, func) + new_data = ops.dispatch_to_series(left, right, func) else: # fastpath --> operate directly on values with np.errstate(all="ignore"): new_data = func(left.values.T, right.values).T - return self._constructor( - new_data, index=left.index, columns=self.columns, copy=False - ) + return left._construct_result(other, new_data, func) def _combine_match_columns(self, other: Series, func, level=None): left, right = self.align(other, join="outer", axis=1, level=level, copy=False) # at this point we have `left.columns.equals(right.index)` - return ops.dispatch_to_series(left, right, func, axis="columns") + new_data = ops.dispatch_to_series(left, right, func, axis="columns") + return left._construct_result(right, new_data, func) def _combine_const(self, other, func): # scalar other or np.ndim(other) == 0 - return ops.dispatch_to_series(self, other, func) + new_data = ops.dispatch_to_series(self, other, func) + return self._construct_result(other, new_data, func) + + def _construct_result(self, other, result, func): + """ + Wrap the result of an arithmetic, comparison, or logical operation. + + Parameters + ---------- + other : object + result : DataFrame + func : binary operator + + Returns + ------- + DataFrame + + Notes + ----- + `func` is included for compat with SparseDataFrame signature, is not + needed here. + """ + out = self._constructor(result, index=self.index, copy=False) + # Pin columns instead of passing to constructor for compat with + # non-unique columns case + out.columns = self.columns + return out + # TODO: finalize? we do for SparseDataFrame def combine(self, other, func, fill_value=None, overwrite=True): """ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 016feff7e3beb..38faf3cea88fd 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -509,12 +509,7 @@ def column_op(a, b): raise NotImplementedError(right) new_data = expressions.evaluate(column_op, str_rep, left, right) - - result = left._constructor(new_data, index=left.index, copy=False) - # Pin columns instead of passing to constructor for compat with - # non-unique columns case - result.columns = left.columns - return result + return new_data def dispatch_to_extension_op( @@ -1055,7 +1050,8 @@ def f(self, other, axis=default_axis, level=None): # Another DataFrame if not self._indexed_same(other): self, other = self.align(other, "outer", level=level, copy=False) - return dispatch_to_series(self, other, na_op, str_rep) + new_data = dispatch_to_series(self, other, na_op, str_rep) + return self._construct_result(other, new_data, na_op) elif isinstance(other, ABCSeries): return _combine_series_frame( @@ -1085,7 +1081,8 @@ def f(self, other): raise ValueError( "Can only compare identically-labeled DataFrame objects" ) - return dispatch_to_series(self, other, func, str_rep) + new_data = dispatch_to_series(self, other, func, str_rep) + return self._construct_result(other, new_data, func) elif isinstance(other, ABCSeries): return _combine_series_frame( diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 3d6ba0b8d9774..aaa99839144b4 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -534,19 +534,13 @@ def _set_value(self, index, col, value, takeable=False): # Arithmetic-related methods def _combine_frame(self, other, func, fill_value=None, level=None): - if level is not None: - raise NotImplementedError("'level' argument is not supported") - this, other = self.align(other, join="outer", level=level, copy=False) - new_index, new_columns = this.index, this.columns - - if self.empty and other.empty: - return self._constructor(index=new_index).__finalize__(self) + this._default_fill_value = self._default_fill_value new_data = {} if fill_value is not None: # TODO: be a bit more intelligent here - for col in new_columns: + for col in this.columns: if col in this and col in other: dleft = this[col].to_dense() dright = other[col].to_dense() @@ -555,38 +549,21 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_data[col] = result else: - for col in new_columns: + for col in this.columns: if col in this and col in other: new_data[col] = func(this[col], other[col]) - new_fill_value = self._get_op_result_fill_value(other, func) - - return self._constructor( - data=new_data, - index=new_index, - columns=new_columns, - default_fill_value=new_fill_value, - ).__finalize__(self) + return this._construct_result(other, new_data, func) def _combine_match_index(self, other, func, level=None): - - if level is not None: - raise NotImplementedError("'level' argument is not supported") - this, other = self.align(other, join="outer", axis=0, level=level, copy=False) + this._default_fill_value = self._default_fill_value new_data = {} for col in this.columns: new_data[col] = func(this[col], other) - fill_value = self._get_op_result_fill_value(other, func) - - return self._constructor( - new_data, - index=this.index, - columns=self.columns, - default_fill_value=fill_value, - ).__finalize__(self) + return this._construct_result(other, new_data, func) def _combine_match_columns(self, other, func, level=None): # patched version of DataFrame._combine_match_columns to account for @@ -594,27 +571,40 @@ def _combine_match_columns(self, other, func, level=None): # where 3.0 is numpy.float64 and series is a SparseSeries. Still # possible for this to happen, which is bothersome - if level is not None: - raise NotImplementedError("'level' argument is not supported") - left, right = self.align(other, join="outer", axis=1, level=level, copy=False) assert left.columns.equals(right.index) + left._default_fill_value = self._default_fill_value new_data = {} - for col in left.columns: new_data[col] = func(left[col], right[col]) - return self._constructor( - new_data, - index=left.index, - columns=left.columns, - default_fill_value=self.default_fill_value, - ).__finalize__(self) + # TODO: using this changed some behavior, see GH#28025 + return left._construct_result(other, new_data, func) def _combine_const(self, other, func): return self._apply_columns(lambda x: func(x, other)) + def _construct_result(self, other, result, func): + """ + Wrap the result of an arithmetic, comparison, or logical operation. + + Parameters + ---------- + other : object + result : SparseDataFrame + func : binary operator + + Returns + ------- + SparseDataFrame + """ + fill_value = self._get_op_result_fill_value(other, func) + + out = self._constructor(result, index=self.index, default_fill_value=fill_value) + out.columns = self.columns + return out.__finalize__(self) + def _get_op_result_fill_value(self, other, func): own_default = self.default_fill_value @@ -643,6 +633,11 @@ def _get_op_result_fill_value(self, other, func): else: fill_value = func(np.float64(own_default), np.float64(other.fill_value)) fill_value = item_from_zerodim(fill_value) + + elif isinstance(other, Series): + # reached via _combine_match_columns + fill_value = self.default_fill_value + else: raise NotImplementedError(type(other))
I _think_ that after this we're not far from being able to use the base class versions of _combine_frame, _combine_match_index, _combine_match_columns, and _combine_const. That'll be a good day. The only actual behavior changed is in SparseDataFrame._combine_match_columns, which is changed to match the other methods. See #28025
https://api.github.com/repos/pandas-dev/pandas/pulls/28027
2019-08-20T03:02:38Z
2019-09-17T12:40:44Z
2019-09-17T12:40:43Z
2019-09-17T14:03:04Z
BUG: rfloordiv with fill_value, closes#27464
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 463dcef9feab6..108ddb1cdeab5 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -54,7 +54,7 @@ Numeric - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) - Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) -- +- Bug in :class:`DataFrame` arithmetic where missing values in results were incorrectly masked with ``NaN`` instead of ``Inf`` (:issue:`27464`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..1be7e0736f9fe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -108,6 +108,7 @@ sanitize_index, to_arrays, ) +from pandas.core.ops.missing import dispatch_fill_zeros from pandas.core.series import Series from pandas.io.formats import console, format as fmt @@ -5305,7 +5306,9 @@ def _arith_op(left, right): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: - result = _arith_op(this.values, other.values) + with np.errstate(all="ignore"): + result = _arith_op(this.values, other.values) + result = dispatch_fill_zeros(func, this.values, other.values, result) return self._constructor( result, index=new_index, columns=new_columns, copy=False ) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2b23790e4ccd3..d686d9f90a5a4 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1227,3 +1227,36 @@ def test_addsub_arithmetic(self, dtype, delta): tm.assert_index_equal(index + index, 2 * index) tm.assert_index_equal(index - index, 0 * index) assert not (index - index).empty + + +def test_fill_value_inf_masking(): + # GH #27464 make sure we mask 0/1 with Inf and not NaN + df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]}) + + other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3]) + + result = df.rfloordiv(other, fill_value=1) + + expected = pd.DataFrame( + {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_div_silenced(): + # GH#26793 + pdf1 = pd.DataFrame( + { + "A": np.arange(10), + "B": [np.nan, 1, 2, 3, 4] * 2, + "C": [np.nan] * 10, + "D": np.arange(10), + }, + index=list("abcdefghij"), + columns=list("ABCD"), + ) + pdf2 = pd.DataFrame( + np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX") + ) + with tm.assert_produces_warning(None): + pdf1.div(pdf2, fill_value=0)
- [x] closes #27464, closes #26793 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28024
2019-08-20T01:04:19Z
2019-08-20T17:45:06Z
2019-08-20T17:45:06Z
2019-08-20T18:42:02Z
CLN: timeseries in plotting clean up
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index f3fcb090e9883..f160e50d8d99b 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -304,23 +304,6 @@ def _maybe_convert_index(ax, data): # Do we need the rest for convenience? -def format_timedelta_ticks(x, pos, n_decimals): - """ - Convert seconds to 'D days HH:MM:SS.F' - """ - s, ns = divmod(x, 1e9) - m, s = divmod(s, 60) - h, m = divmod(m, 60) - d, h = divmod(h, 24) - decimals = int(ns * 10 ** (n_decimals - 9)) - s = r"{:02d}:{:02d}:{:02d}".format(int(h), int(m), int(s)) - if n_decimals > 0: - s += ".{{:0{:0d}d}}".format(n_decimals).format(decimals) - if d != 0: - s = "{:d} days ".format(int(d)) + s - return s - - def _format_coord(freq, t, y): return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y) diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 7001264c41c05..aabe16d5050f9 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -388,3 +388,21 @@ def test_convert_nested(self): r1 = self.pc.convert([data, data], None, self.axis) r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)] assert r1 == r2 + + +class TestTimeDeltaConverter: + """Test timedelta converter""" + + @pytest.mark.parametrize( + "x, decimal, format_expected", + [ + (0.0, 0, "00:00:00"), + (3972320000000, 1, "01:06:12.3"), + (713233432000000, 2, "8 days 06:07:13.43"), + (32423432000000, 4, "09:00:23.4320"), + ], + ) + def test_format_timedelta_ticks(self, x, decimal, format_expected): + tdc = converter.TimeSeries_TimedeltaFormatter + result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal) + assert result == format_expected
`format_timedelta_ticks` appears both in `timeseries.py` and `matplotlib.converter.py`, and there is no test. Then i look through the code base, it is only used in `matplotlib.converter.py` for `TimeSeries_TimedeltaFormatter`. So I remove the one in `timeseries.py` and add test for it. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28020
2019-08-19T20:35:31Z
2019-09-11T01:43:55Z
2019-09-11T01:43:55Z
2019-09-11T01:44:01Z
Backport PR #28016 on branch 0.25.x (TST: fix flaky xfail)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 3a5a387b919be..1ddaa4692d741 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1482,16 +1482,7 @@ def test_value_counts_with_nan(self): @pytest.mark.parametrize( "dtype", - [ - "int_", - "uint", - "float_", - "unicode_", - "timedelta64[h]", - pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) - ), - ], + ["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"], ) def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype)) @@ -1499,6 +1490,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 1 input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype)) tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc1 values are seemingly-random + if not (np.array(tc1) == input1).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, True]) tm.assert_series_equal(tc1.duplicated(), expected) @@ -1524,6 +1519,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 2 input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype)) tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc2 values are seemingly-random + if not (np.array(tc2) == input2).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, False, True, True, False]) tm.assert_series_equal(tc2.duplicated(), expected)
Backport PR #28016: TST: fix flaky xfail
https://api.github.com/repos/pandas-dev/pandas/pulls/28019
2019-08-19T18:29:18Z
2019-08-20T06:45:49Z
2019-08-20T06:45:49Z
2019-08-20T06:45:49Z
DOC/TST: Update the parquet (pyarrow >= 0.15) docs and tests regarding Categorical support
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 2c8f66dd99e72..ee097c1f4d5e8 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -4710,7 +4710,8 @@ Several caveats. indexes. This extra column can cause problems for non-Pandas consumers that are not expecting it. You can force including or omitting indexes with the ``index`` argument, regardless of the underlying engine. * Index level names, if specified, must be strings. -* Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. +* In the ``pyarrow`` engine, categorical dtypes for non-string types can be serialized to parquet, but will de-serialize as their primitive dtype. +* The ``pyarrow`` engine preserves the ``ordered`` flag of categorical dtypes with string types. ``fastparquet`` does not preserve the ``ordered`` flag. * Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message on an attempt at serialization. @@ -4734,7 +4735,9 @@ See the documentation for `pyarrow <https://arrow.apache.org/docs/python/>`__ an 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.date_range('20130101', periods=3), - 'g': pd.date_range('20130101', periods=3, tz='US/Eastern')}) + 'g': pd.date_range('20130101', periods=3, tz='US/Eastern'), + 'h': pd.Categorical(list('abc')), + 'i': pd.Categorical(list('abc'), ordered=True)}) df df.dtypes diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index f8c4f9f3dc410..2b147f948adb1 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -176,6 +176,7 @@ Categorical - Added test to assert the :func:`fillna` raises the correct ValueError message when the value isn't a value from categories (:issue:`13628`) - Bug in :meth:`Categorical.astype` where ``NaN`` values were handled incorrectly when casting to int (:issue:`28406`) - :meth:`Categorical.searchsorted` and :meth:`CategoricalIndex.searchsorted` now work on unordered categoricals also (:issue:`21667`) +- Added test to assert roundtripping to parquet with :func:`DataFrame.to_parquet` or :func:`read_parquet` will preserve Categorical dtypes for string types (:issue:`27955`) - diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index efc2b6d6c5b3d..2a95904d5668d 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -167,6 +167,7 @@ def compare(repeat): df.to_parquet(path, **write_kwargs) with catch_warnings(record=True): actual = read_parquet(path, **read_kwargs) + tm.assert_frame_equal(expected, actual, check_names=check_names) if path is None: @@ -461,11 +462,26 @@ def test_unsupported(self, pa): def test_categorical(self, pa): # supported in >= 0.7.0 - df = pd.DataFrame({"a": pd.Categorical(list("abc"))}) + df = pd.DataFrame() + df["a"] = pd.Categorical(list("abcdef")) - # de-serialized as object - expected = df.assign(a=df.a.astype(object)) - check_round_trip(df, pa, expected=expected) + # test for null, out-of-order values, and unobserved category + df["b"] = pd.Categorical( + ["bar", "foo", "foo", "bar", None, "bar"], + dtype=pd.CategoricalDtype(["foo", "bar", "baz"]), + ) + + # test for ordered flag + df["c"] = pd.Categorical( + ["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True + ) + + if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15.0"): + check_round_trip(df, pa) + else: + # de-serialized as object for pyarrow < 0.15 + expected = df.astype(object) + check_round_trip(df, pa, expected=expected) def test_s3_roundtrip(self, df_compat, s3_resource, pa): # GH #19134
- [x] closes #27955 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28018
2019-08-19T18:22:38Z
2019-10-04T19:48:04Z
2019-10-04T19:48:04Z
2019-10-05T00:23:59Z
Backport PR #27773 on branch 0.25.x (BUG: _can_use_numexpr fails when passed large Series)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 87e46f97d3157..cec927d73edca 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -54,7 +54,7 @@ Numeric ^^^^^^^ - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) -- +- Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) - Conversion diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index ea61467080291..9621fb1d65509 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -76,16 +76,17 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: - # check for dtype compatibility dtypes = set() for o in [a, b]: - if hasattr(o, "dtypes"): + # Series implements dtypes, check for dimension count as well + if hasattr(o, "dtypes") and o.ndim > 1: s = o.dtypes.value_counts() if len(s) > 1: return False dtypes |= set(s.index.astype(str)) - elif isinstance(o, np.ndarray): + # ndarray and Series Case + elif hasattr(o, "dtype"): dtypes |= {o.dtype.name} # allowed are a superset diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 4070624985068..ca514f62f451d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -66,7 +66,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=Tr operator_name = "truediv" if test_flex: - op = lambda x, y: getattr(df, arith)(y) + op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: op = getattr(operator, operator_name) @@ -318,7 +318,6 @@ def testit(): for f in [self.frame, self.frame2, self.mixed, self.mixed2]: for cond in [True, False]: - c = np.empty(f.shape, dtype=np.bool_) c.fill(cond) result = expr.where(c, f.values, f.values + 1) @@ -431,3 +430,29 @@ def test_bool_ops_column_name_dtype(self, test_input, expected): # GH 22383 - .ne fails if columns containing column name 'dtype' result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + if axis == 1 and arith == "floordiv": + pytest.xfail("'floordiv' does not succeed with axis=1 #27636") + + df = self.frame + if axis == 1: + other = self.frame.iloc[0, :] + else: + other = self.frame.iloc[:, 0] + + expr._MIN_ELEMENTS = 0 + + op_func = getattr(df, arith) + + expr.set_use_numexpr(False) + expected = op_func(other, axis=axis) + expr.set_use_numexpr(True) + + result = op_func(other, axis=axis) + assert_frame_equal(expected, result)
Backport PR #27773: BUG: _can_use_numexpr fails when passed large Series
https://api.github.com/repos/pandas-dev/pandas/pulls/28017
2019-08-19T17:20:59Z
2019-08-20T06:45:31Z
2019-08-20T06:45:31Z
2019-08-20T06:45:31Z
TST: fix flaky xfail
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 3a5a387b919be..1ddaa4692d741 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1482,16 +1482,7 @@ def test_value_counts_with_nan(self): @pytest.mark.parametrize( "dtype", - [ - "int_", - "uint", - "float_", - "unicode_", - "timedelta64[h]", - pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) - ), - ], + ["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"], ) def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype)) @@ -1499,6 +1490,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 1 input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype)) tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc1 values are seemingly-random + if not (np.array(tc1) == input1).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, True]) tm.assert_series_equal(tc1.duplicated(), expected) @@ -1524,6 +1519,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 2 input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype)) tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc2 values are seemingly-random + if not (np.array(tc2) == input2).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, False, True, True, False]) tm.assert_series_equal(tc2.duplicated(), expected)
xfail in more specific circumstance - [x] closes #28011 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28016
2019-08-19T17:13:13Z
2019-08-19T18:29:07Z
2019-08-19T18:29:07Z
2019-08-19T18:41:50Z
WEB: Adding new pandas website
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index f839d86318e2e..b03c4f2238445 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -188,9 +188,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then set -o pipefail if [[ "$AZURE" == "true" ]]; then # we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files - ! grep -n '--exclude=*.'{svg,c,cpp,html} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' + ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' else - ! grep -n '--exclude=*.'{svg,c,cpp,html} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' + ! grep -n '--exclude=*.'{svg,c,cpp,html,js} --exclude-dir=env -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' fi RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/environment.yml b/environment.yml index 89089dcea8c99..6187321cd9242 100644 --- a/environment.yml +++ b/environment.yml @@ -36,6 +36,12 @@ dependencies: - nbsphinx - pandoc + # web (jinja2 is also needed, but it's also an optional pandas dependency) + - markdown + - feedparser + - pyyaml + - requests + # testing - boto3 - botocore>=1.11 diff --git a/requirements-dev.txt b/requirements-dev.txt index 3f4636043dbac..fd8e6378240b4 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -17,6 +17,10 @@ numpydoc>=0.9.0 nbconvert>=5.4.1 nbsphinx pandoc +markdown +feedparser +pyyaml +requests boto3 botocore>=1.11 hypothesis>=3.82 diff --git a/web/README.md b/web/README.md new file mode 100644 index 0000000000000..7396fbd0833a1 --- /dev/null +++ b/web/README.md @@ -0,0 +1,12 @@ +Directory containing the pandas website (hosted at https://pandas.io). + +The website sources are in `web/pandas/`, which also include a `config.yml` file +containing the settings to build the website. The website is generated with the +command `./pandas_web.py pandas`. See `./pandas_web.py --help` and the header of +the script for more information and options. + +After building the website, to navigate it, it is needed to access the web using +an http server (a not open the local files with the browser, since the links and +the image sources are absolute to where they are served from). The easiest way +to run an http server locally is to run `python -m http.server` from the +`web/build/` directory. diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html new file mode 100644 index 0000000000000..253318182f30c --- /dev/null +++ b/web/pandas/_templates/layout.html @@ -0,0 +1,85 @@ +<!DOCTYPE html> +<html> + <head> + <script type="text/javascript"> + var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-27880019-2']); _gaq.push(['_trackPageview']); + (function() { + var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; + ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; + var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); + })(); + </script> + <title>pandas - Python Data Analysis Library</title> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> + <link rel="stylesheet" + href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" + crossorigin="anonymous"> + {% for stylesheet in static.css %} + <link rel="stylesheet" + href="{{ base_url }}{{ stylesheet }}"> + {% endfor %} + </head> + <body> + <header> + <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark"> + <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> + <span class="navbar-toggler-icon"></span> + </button> + + {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %} + + <div class="collapse navbar-collapse" id="nav-content"> + <ul class="navbar-nav"> + {% for item in navbar %} + {% if not item.has_subitems %} + <li class="nav-item"> + <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a> + </li> + {% else %} + <li class="nav-item dropdown"> + <a class="nav-link dropdown-toggle" + data-toggle="dropdown" + id="{{ item.slug }}" + href="#" + role="button" + aria-haspopup="true" + aria-expanded="false">{{ item.name }}</a> + <div class="dropdown-menu" aria-labelledby="{{ item.slug }}"> + {% for subitem in item.target %} + <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a> + {% endfor %} + </div> + </li> + {% endif %} + {% endfor %} + </ul> + </div> + </nav> + </header> + <main role="main"> + <div class="container"> + {% block body %}{% endblock %} + </div> + </main> + <footer class="container pt-4 pt-md-5 border-top"> + <p class="float-right"> + <a href="#">Back to top</a> + </p> + <p> + © 2009 - 2019, pandas team + </p> + </footer> + + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" + crossorigin="anonymous"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" + crossorigin="anonymous"></script> + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" + crossorigin="anonymous"></script> + </body> +</html> diff --git a/web/pandas/blog.html b/web/pandas/blog.html new file mode 100644 index 0000000000000..ffe6f97d679e4 --- /dev/null +++ b/web/pandas/blog.html @@ -0,0 +1,14 @@ +{% extends "layout.html" %} + +{% block body %} + {% for post in blog.posts %} + <div class="card"> + <div class="card-body"> + <h3 class="card-title"><a href="{{post.link }}" target="_blank">{{ post.title }}</a></h3> + <h6 class="card-subtitle">Source: {{ post.feed }} | Author: {{ post.author }} | Published: {{ post.published.strftime("%b %d, %Y") }}</h6> + <div class="card-text">{{ post.summary }}</div> + <a class="card-link" href="{{post.link }}" target="_blank">Read</a> + </div> + </div> + {% endfor %} +{% endblock %} diff --git a/web/pandas/community/about.md b/web/pandas/community/about.md new file mode 100644 index 0000000000000..4e50d280d2a10 --- /dev/null +++ b/web/pandas/community/about.md @@ -0,0 +1,86 @@ +# About pandas + +## History of development + +In 2008, _pandas_ development began at [AQR Capital Management](http://www.aqr.com). +By the end of 2009 it had been [open sourced](http://en.wikipedia.org/wiki/Open_source), +and is actively supported today by a community of like-minded individuals around the world who +contribute their valuable time and energy to help make open source _pandas_ +possible. Thank you to [all of our contributors](team.html). + +Since 2015, _pandas_ is a [NumFOCUS sponsored project](https://numfocus.org/sponsored-projects). +This will help ensure the success of development of _pandas_ as a world-class open-source project. + +### Timeline + +- **2008**: Development of _pandas_ started +- **2009**: _pandas_ becomes open source +- **2012**: First edition of _Python for Data Analysis_ is published +- **2015**: _pandas_ becomes a [NumFOCUS sponsored project](https://numfocus.org/sponsored-projects) +- **2018**: First in-person core developer sprint + +## Library Highlights + +- A fast and efficient **DataFrame** object for data manipulation with + integrated indexing; + +- Tools for **reading and writing data** between in-memory data structures and + different formats: CSV and text files, Microsoft Excel, SQL databases, and + the fast HDF5 format; + +- Intelligent **data alignment** and integrated handling of **missing data**: + gain automatic label-based alignment in computations and easily manipulate + messy data into an orderly form; + +- Flexible **reshaping** and pivoting of data sets; + +- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting** + of large data sets; + +- Columns can be inserted and deleted from data structures for **size + mutability**; + +- Aggregating or transforming data with a powerful **group by** engine + allowing split-apply-combine operations on data sets; + +- High performance **merging and joining** of data sets; + +- **Hierarchical axis indexing** provides an intuitive way of working with + high-dimensional data in a lower-dimensional data structure; + +- **Time series**-functionality: date range generation and frequency + conversion, moving window statistics, moving window linear regressions, date + shifting and lagging. Even create domain-specific time offsets and join time + series without losing data; + +- Highly **optimized for performance**, with critical code paths written in + [Cython](http://www.cython.org/) or C. + +- Python with *pandas* is in use in a wide variety of **academic and + commercial** domains, including Finance, Neuroscience, Economics, + Statistics, Advertising, Web Analytics, and more. + +## Mission + +_pandas_ aims to be the fundamental high-level building block for doing practical, +real world data analysis in Python. +Additionally, it has the broader goal of becoming the most powerful and flexible +open source data analysis / manipulation tool available in any language. + +## Vision + +A world where data analytics and manipulation software is: + +- Accessible to everyone +- Free for users to use and modify +- Flexible +- Powerful +- Easy to use +- Fast + +## Values + +Is in the core of _pandas_ to be respectful and welcoming with everybody, +users, contributors and the broader community. Regardless of level of experience, +gender, gender identity and expression, sexual orientation, disability, +personal appearance, body size, race, ethnicity, age, religion, or nationality. diff --git a/web/pandas/community/citing.md b/web/pandas/community/citing.md new file mode 100644 index 0000000000000..6bad948bb3736 --- /dev/null +++ b/web/pandas/community/citing.md @@ -0,0 +1,46 @@ +# Citing pandas + +## Citing + +If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers: + +- [Data structures for statistical computing in python](http://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf), + McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010. + + @inproceedings{mckinney2010data, + title={Data structures for statistical computing in python}, + author={Wes McKinney}, + booktitle={Proceedings of the 9th Python in Science Conference}, + volume={445}, + pages={51--56}, + year={2010}, + organization={Austin, TX} + } + + +- [pandas: a foundational Python library for data analysis and statistics](https://www.scribd.com/document/71048089/pandas-a-Foundational-Python-Library-for-Data-Analysis-and-Statistics), + McKinney, Python for High Performance and Scientific Computing, Volume 14, 2011. + + @article{mckinney2011pandas, + title={pandas: a foundational Python library for data analysis and statistics}, + author={Wes McKinney}, + journal={Python for High Performance and Scientific Computing}, + volume={14}, + year={2011} + } + +## Brand and logo + +When using the project name _pandas_, please use it in lower case, even at the beginning of a sentence. + +The official logo of _pandas_ is: + +![]({{ base_url }}/static/img/pandas.svg) + +You can download a `svg` version of the logo [here]({{ base_url }}/static/img/pandas.svg). + +When using the logo, please follow the next directives: + +- Leave enough margin around the logo +- Do not distort the logo by changing its proportions +- Do not place text or other elements on top of the logo diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md new file mode 100644 index 0000000000000..2841349fdb556 --- /dev/null +++ b/web/pandas/community/coc.md @@ -0,0 +1,63 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +A working group of community members is committed to promptly addressing any +reported issues. The working group is made up of pandas contributors and users. +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the working group by e-mail (pandas-coc@googlegroups.com). +Messages sent to this e-mail address will not be publicly visible but only to +the working group members. The working group currently includes + +<ul> + {% for person in maintainers.coc %} + <li>{{ person }}</li> + {% endfor %} +</ul> + +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version], +and the [Swift Code of Conduct][swift]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ +[swift]: https://swift.org/community/#code-of-conduct + diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md new file mode 100644 index 0000000000000..af27c31b52d50 --- /dev/null +++ b/web/pandas/community/ecosystem.md @@ -0,0 +1,370 @@ +# Pandas ecosystem + +Increasingly, packages are being built on top of pandas to address +specific needs in data preparation, analysis and visualization. This is +encouraging because it means pandas is not only helping users to handle +their data tasks but also that it provides a better starting point for +developers to build powerful and more focused data tools. The creation +of libraries that complement pandas' functionality also allows pandas +development to remain focused around it's original requirements. + +This is an inexhaustive list of projects that build on pandas in order +to provide tools in the PyData space. For a list of projects that depend +on pandas, see the [libraries.io usage page for +pandas](https://libraries.io/pypi/pandas/usage) or [search pypi for +pandas](https://pypi.org/search/?q=pandas). + +We'd like to make it easier for users to find these projects, if you +know of other substantial projects that you feel should be on this list, +please let us know. + +## Statistics and machine learning + +### [Statsmodels](https://www.statsmodels.org/) + +Statsmodels is the prominent Python "statistics and econometrics +library" and it has a long-standing special relationship with pandas. +Statsmodels provides powerful statistics, econometrics, analysis and +modeling functionality that is out of pandas' scope. Statsmodels +leverages pandas objects as the underlying data container for +computation. + +### [sklearn-pandas](https://github.com/paulgb/sklearn-pandas) + +Use pandas DataFrames in your [scikit-learn](https://scikit-learn.org/) +ML pipeline. + +### [Featuretools](https://github.com/featuretools/featuretools/) + +Featuretools is a Python library for automated feature engineering built +on top of pandas. It excels at transforming temporal and relational +datasets into feature matrices for machine learning using reusable +feature engineering "primitives". Users can contribute their own +primitives in Python and share them with the rest of the community. + +## Visualization + +### [Altair](https://altair-viz.github.io/) + +Altair is a declarative statistical visualization library for Python. +With Altair, you can spend more time understanding your data and its +meaning. Altair's API is simple, friendly and consistent and built on +top of the powerful Vega-Lite JSON specification. This elegant +simplicity produces beautiful and effective visualizations with a +minimal amount of code. Altair works with Pandas DataFrames. + +### [Bokeh](https://bokeh.pydata.org) + +Bokeh is a Python interactive visualization library for large datasets +that natively uses the latest web technologies. Its goal is to provide +elegant, concise construction of novel graphics in the style of +Protovis/D3, while delivering high-performance interactivity over large +data to thin clients. + +[Pandas-Bokeh](https://github.com/PatrikHlobil/Pandas-Bokeh) provides a +high level API for Bokeh that can be loaded as a native Pandas plotting +backend via + +``` +pd.set_option("plotting.backend", "pandas_bokeh") +``` + +It is very similar to the matplotlib plotting backend, but provides +interactive web-based charts and maps. + +### [seaborn](https://seaborn.pydata.org) + +Seaborn is a Python visualization library based on +[matplotlib](https://matplotlib.org). It provides a high-level, +dataset-oriented interface for creating attractive statistical graphics. +The plotting functions in seaborn understand pandas objects and leverage +pandas grouping operations internally to support concise specification +of complex visualizations. Seaborn also goes beyond matplotlib and +pandas with the option to perform statistical estimation while plotting, +aggregating across observations and visualizing the fit of statistical +models to emphasize patterns in a dataset. + +### [yhat/ggpy](https://github.com/yhat/ggpy) + +Hadley Wickham\'s [ggplot2](https://ggplot2.tidyverse.org/) is a +foundational exploratory visualization package for the R language. Based +on [\"The Grammar of +Graphics\"](https://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html) +it provides a powerful, declarative and extremely general way to +generate bespoke plots of any kind of data. It\'s really quite +incredible. Various implementations to other languages are available, +but a faithful implementation for Python users has long been missing. +Although still young (as of Jan-2014), the +[yhat/ggpy](https://github.com/yhat/ggpy) project has been progressing +quickly in that direction. + +### [IPython Vega](https://github.com/vega/ipyvega) + +[IPython Vega](https://github.com/vega/ipyvega) leverages [Vega +\<https://github.com/trifacta/vega\>]\_\_ to create plots +within Jupyter Notebook. + +### [Plotly](https://plot.ly/python) + +[Plotly's](https://plot.ly/) [Python API](https://plot.ly/python/) +enables interactive figures and web shareability. Maps, 2D, 3D, and +live-streaming graphs are rendered with WebGL and +[D3.js](https://d3js.org/). The library supports plotting directly from +a pandas DataFrame and cloud-based collaboration. Users of [matplotlib, +ggplot for Python, and +Seaborn](https://plot.ly/python/matplotlib-to-plotly-tutorial/) can +convert figures into interactive web-based plots. Plots can be drawn in +[IPython Notebooks](https://plot.ly/ipython-notebooks/) , edited with R +or MATLAB, modified in a GUI, or embedded in apps and dashboards. Plotly +is free for unlimited sharing, and has +[cloud](https://plot.ly/product/plans/), +[offline](https://plot.ly/python/offline/), or +[on-premise](https://plot.ly/product/enterprise/) accounts for private +use. + +### [QtPandas](https://github.com/draperjames/qtpandas) + +Spun off from the main pandas library, the +[qtpandas](https://github.com/draperjames/qtpandas) library enables +DataFrame visualization and manipulation in PyQt4 and PySide +applications. + +## IDE + +### [IPython](https://ipython.org/documentation.html) + +IPython is an interactive command shell and distributed computing +environment. IPython tab completion works with Pandas methods and also +attributes like DataFrame columns. + +### [Jupyter Notebook / Jupyter Lab](https://jupyter.org) + +Jupyter Notebook is a web application for creating Jupyter notebooks. A +Jupyter notebook is a JSON document containing an ordered list of +input/output cells which can contain code, text, mathematics, plots and +rich media. Jupyter notebooks can be converted to a number of open +standard output formats (HTML, HTML presentation slides, LaTeX, PDF, +ReStructuredText, Markdown, Python) through 'Download As' in the web +interface and `jupyter convert` in a shell. + +Pandas DataFrames implement `_repr_html_`and `_repr_latex` methods which +are utilized by Jupyter Notebook for displaying (abbreviated) HTML or +LaTeX tables. LaTeX output is properly escaped. (Note: HTML tables may +or may not be compatible with non-HTML Jupyter output formats.) + +See `Options and Settings <options>` and +`Available Options <options.available>` +for pandas `display.` settings. + +### [quantopian/qgrid](https://github.com/quantopian/qgrid) + +qgrid is \"an interactive grid for sorting and filtering DataFrames in +IPython Notebook\" built with SlickGrid. + +### [Spyder](https://www.spyder-ide.org/) + +Spyder is a cross-platform PyQt-based IDE combining the editing, +analysis, debugging and profiling functionality of a software +development tool with the data exploration, interactive execution, deep +inspection and rich visualization capabilities of a scientific +environment like MATLAB or Rstudio. + +Its [Variable +Explorer](https://docs.spyder-ide.org/variableexplorer.html) allows +users to view, manipulate and edit pandas `Index`, `Series`, and +`DataFrame` objects like a \"spreadsheet\", including copying and +modifying values, sorting, displaying a \"heatmap\", converting data +types and more. Pandas objects can also be renamed, duplicated, new +columns added, copyed/pasted to/from the clipboard (as TSV), and +saved/loaded to/from a file. Spyder can also import data from a variety +of plain text and binary files or the clipboard into a new pandas +DataFrame via a sophisticated import wizard. + +Most pandas classes, methods and data attributes can be autocompleted in +Spyder\'s [Editor](https://docs.spyder-ide.org/editor.html) and [IPython +Console](https://docs.spyder-ide.org/ipythonconsole.html), and Spyder\'s +[Help pane](https://docs.spyder-ide.org/help.html) can retrieve and +render Numpydoc documentation on pandas objects in rich text with Sphinx +both automatically and on-demand. + +## API + +### [pandas-datareader](https://github.com/pydata/pandas-datareader) + +`pandas-datareader` is a remote data access library for pandas +(PyPI:`pandas-datareader`). It is based on functionality that was +located in `pandas.io.data` and `pandas.io.wb` but was split off in +v0.19. See more in the [pandas-datareader +docs](https://pandas-datareader.readthedocs.io/en/latest/): + +The following data feeds are available: + +- Google Finance +- Tiingo +- Morningstar +- IEX +- Robinhood +- Enigma +- Quandl +- FRED +- Fama/French +- World Bank +- OECD +- Eurostat +- TSP Fund Data +- Nasdaq Trader Symbol Definitions +- Stooq Index Data +- MOEX Data + +### [quandl/Python](https://github.com/quandl/Python) + +Quandl API for Python wraps the Quandl REST API to return Pandas +DataFrames with timeseries indexes. + +### [pydatastream](https://github.com/vfilimonov/pydatastream) + +PyDatastream is a Python interface to the [Thomson Dataworks Enterprise +(DWE/Datastream)](http://dataworks.thomson.com/Dataworks/Enterprise/1.0/) +SOAP API to return indexed Pandas DataFrames with financial data. This +package requires valid credentials for this API (non free). + +### [pandaSDMX](https://pandasdmx.readthedocs.io) + +pandaSDMX is a library to retrieve and acquire statistical data and +metadata disseminated in [SDMX](https://www.sdmx.org) 2.1, an +ISO-standard widely used by institutions such as statistics offices, +central banks, and international organisations. pandaSDMX can expose +datasets and related structural metadata including data flows, +code-lists, and data structure definitions as pandas Series or +MultiIndexed DataFrames. + +### [fredapi](https://github.com/mortada/fredapi) + +fredapi is a Python interface to the [Federal Reserve Economic Data +(FRED)](https://fred.stlouisfed.org/) provided by the Federal Reserve +Bank of St. Louis. It works with both the FRED database and ALFRED +database that contains point-in-time data (i.e. historic data +revisions). fredapi provides a wrapper in Python to the FRED HTTP API, +and also provides several convenient methods for parsing and analyzing +point-in-time data from ALFRED. fredapi makes use of pandas and returns +data in a Series or DataFrame. This module requires a FRED API key that +you can obtain for free on the FRED website. + +## Domain specific + +### [Geopandas](https://github.com/kjordahl/geopandas) + +Geopandas extends pandas data objects to include geographic information +which support geometric operations. If your work entails maps and +geographical coordinates, and you love pandas, you should take a close +look at Geopandas. + +### [xarray](https://github.com/pydata/xarray) + +xarray brings the labeled data power of pandas to the physical sciences +by providing N-dimensional variants of the core pandas data structures. +It aims to provide a pandas-like and pandas-compatible toolkit for +analytics on multi-dimensional arrays, rather than the tabular data for +which pandas excels. + +## Out-of-core + +### [Blaze](http://blaze.pydata.org/) + +Blaze provides a standard API for doing computations with various +in-memory and on-disk backends: NumPy, Pandas, SQLAlchemy, MongoDB, +PyTables, PySpark. + +### [Dask](https://dask.readthedocs.io/en/latest/) + +Dask is a flexible parallel computing library for analytics. Dask +provides a familiar `DataFrame` interface for out-of-core, parallel and +distributed computing. + +### [Dask-ML](https://dask-ml.readthedocs.io/en/latest/) + +Dask-ML enables parallel and distributed machine learning using Dask +alongside existing machine learning libraries like Scikit-Learn, +XGBoost, and TensorFlow. + +### [Koalas](https://koalas.readthedocs.io/en/latest/) + +Koalas provides a familiar pandas DataFrame interface on top of Apache +Spark. It enables users to leverage multi-cores on one machine or a +cluster of machines to speed up or scale their DataFrame code. + +### [Odo](http://odo.pydata.org) + +Odo provides a uniform API for moving data between different formats. It +uses pandas own `read_csv` for CSV IO and leverages many existing +packages such as PyTables, h5py, and pymongo to move data between non +pandas formats. Its graph based approach is also extensible by end users +for custom formats that may be too specific for the core of odo. + +### [Ray](https://ray.readthedocs.io/en/latest/pandas_on_ray.html) + +Pandas on Ray is an early stage DataFrame library that wraps Pandas and +transparently distributes the data and computation. The user does not +need to know how many cores their system has, nor do they need to +specify how to distribute the data. In fact, users can continue using +their previous Pandas notebooks while experiencing a considerable +speedup from Pandas on Ray, even on a single machine. Only a +modification of the import statement is needed, as we demonstrate below. +Once you've changed your import statement, you're ready to use Pandas on +Ray just like you would Pandas. + +``` +# import pandas as pd +import ray.dataframe as pd +``` + +### [Vaex](https://docs.vaex.io/) + +Increasingly, packages are being built on top of pandas to address +specific needs in data preparation, analysis and visualization. Vaex is +a python library for Out-of-Core DataFrames (similar to Pandas), to +visualize and explore big tabular datasets. It can calculate statistics +such as mean, sum, count, standard deviation etc, on an N-dimensional +grid up to a billion (10^9^) objects/rows per second. Visualization is +done using histograms, density plots and 3d volume rendering, allowing +interactive exploration of big data. Vaex uses memory mapping, zero +memory copy policy and lazy computations for best performance (no memory +wasted). + +- ``vaex.from_pandas`` +- ``vaex.to_pandas_df`` + +## Data cleaning and validation + +### [pyjanitor](https://github.com/ericmjl/pyjanitor/) + +Pyjanitor provides a clean API for cleaning data, using method chaining. + +### [Engarde](https://engarde.readthedocs.io/en/latest/) + +Engarde is a lightweight library used to explicitly state your +assumptions about your datasets and check that they're *actually* true. + +## Extension data types + +Pandas provides an interface for defining +`extension types <extending.extension-types>` to extend NumPy's type system. The following libraries +implement that interface to provide types not found in NumPy or pandas, +which work well with pandas' data containers. + +### [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) + +Cyberpandas provides an extension type for storing arrays of IP +Addresses. These arrays can be stored inside pandas\' Series and +DataFrame. + +## Accessors + +A directory of projects providing +`extension accessors <extending.register-accessors>`. This is for users to discover new accessors and for library +authors to coordinate on the namespace. + + Library Accessor Classes + ------------------------------------------------------------- ---------- ----------------------- + [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) `ip` `Series` + [pdvega](https://altair-viz.github.io/pdvega/) `vgplot` `Series`, `DataFrame` diff --git a/web/pandas/community/roadmap.md b/web/pandas/community/roadmap.md new file mode 100644 index 0000000000000..8a5c2735b3d93 --- /dev/null +++ b/web/pandas/community/roadmap.md @@ -0,0 +1,195 @@ +# Roadmap + +This page provides an overview of the major themes in pandas' +development. Each of these items requires a relatively large amount of +effort to implement. These may be achieved more quickly with dedicated +funding or interest from contributors. + +An item being on the roadmap does not mean that it will *necessarily* +happen, even with unlimited funding. During the implementation period we +may discover issues preventing the adoption of the feature. + +Additionally, an item *not* being on the roadmap does not exclude it +from inclusion in pandas. The roadmap is intended for larger, +fundamental changes to the project that are likely to take months or +years of developer time. Smaller-scoped items will continue to be +tracked on our [issue tracker](https://github.com/pandas-dev/pandas/issues). + +See [Roadmap evolution](#roadmap-evolution) for proposing +changes to this document. + +## Extensibility + +Pandas `extending.extension-types` allow +for extending NumPy types with custom data types and array storage. +Pandas uses extension types internally, and provides an interface for +3rd-party libraries to define their own custom data types. + +Many parts of pandas still unintentionally convert data to a NumPy +array. These problems are especially pronounced for nested data. + +We'd like to improve the handling of extension arrays throughout the +library, making their behavior more consistent with the handling of +NumPy arrays. We'll do this by cleaning up pandas' internals and +adding new methods to the extension array interface. + +## String data type + +Currently, pandas stores text data in an `object` -dtype NumPy array. +The current implementation has two primary drawbacks: First, `object` +-dtype is not specific to strings: any Python object can be stored in an +`object` -dtype array, not just strings. Second: this is not efficient. +The NumPy memory model isn't especially well-suited to variable width +text data. + +To solve the first issue, we propose a new extension type for string +data. This will initially be opt-in, with users explicitly requesting +`dtype="string"`. The array backing this string dtype may initially be +the current implementation: an `object` -dtype NumPy array of Python +strings. + +To solve the second issue (performance), we'll explore alternative +in-memory array libraries (for example, Apache Arrow). As part of the +work, we may need to implement certain operations expected by pandas +users (for example the algorithm used in, `Series.str.upper`). That work +may be done outside of pandas. + +## Apache Arrow interoperability + +[Apache Arrow](https://arrow.apache.org) is a cross-language development +platform for in-memory data. The Arrow logical types are closely aligned +with typical pandas use cases. + +We'd like to provide better-integrated support for Arrow memory and +data types within pandas. This will let us take advantage of its I/O +capabilities and provide for better interoperability with other +languages and libraries using Arrow. + +## Block manager rewrite + +We'd like to replace pandas current internal data structures (a +collection of 1 or 2-D arrays) with a simpler collection of 1-D arrays. + +Pandas internal data model is quite complex. A DataFrame is made up of +one or more 2-dimensional "blocks", with one or more blocks per dtype. +This collection of 2-D arrays is managed by the BlockManager. + +The primary benefit of the BlockManager is improved performance on +certain operations (construction from a 2D array, binary operations, +reductions across the columns), especially for wide DataFrames. However, +the BlockManager substantially increases the complexity and maintenance +burden of pandas. + +By replacing the BlockManager we hope to achieve + +- Substantially simpler code +- Easier extensibility with new logical types +- Better user control over memory use and layout +- Improved micro-performance +- Option to provide a C / Cython API to pandas' internals + +See [these design +documents](https://dev.pandas.io/pandas2/internal-architecture.html#removal-of-blockmanager-new-dataframe-internals) +for more. + +## Decoupling of indexing and internals + +The code for getting and setting values in pandas' data structures +needs refactoring. In particular, we must clearly separate code that +converts keys (e.g., the argument to `DataFrame.loc`) to positions from +code that uses these positions to get or set values. This is related to +the proposed BlockManager rewrite. Currently, the BlockManager sometimes +uses label-based, rather than position-based, indexing. We propose that +it should only work with positional indexing, and the translation of +keys to positions should be entirely done at a higher level. + +Indexing is a complicated API with many subtleties. This refactor will +require care and attention. More details are discussed at +<https://github.com/pandas-dev/pandas/wiki/(Tentative)-rules-for-restructuring-indexing-code> + +## Numba-accelerated operations + +[Numba](https://numba.pydata.org) is a JIT compiler for Python code. +We'd like to provide ways for users to apply their own Numba-jitted +functions where pandas accepts user-defined functions (for example, +`Series.apply`, +`DataFrame.apply`, +`DataFrame.applymap`, and in groupby and +window contexts). This will improve the performance of +user-defined-functions in these operations by staying within compiled +code. + +## Documentation improvements + +We'd like to improve the content, structure, and presentation of the +pandas documentation. Some specific goals include + +- Overhaul the HTML theme with a modern, responsive design + (`15556`) +- Improve the "Getting Started" documentation, designing and writing + learning paths for users different backgrounds (e.g. brand new to + programming, familiar with other languages like R, already familiar + with Python). +- Improve the overall organization of the documentation and specific + subsections of the documentation to make navigation and finding + content easier. + +## Package docstring validation + +To improve the quality and consistency of pandas docstrings, we've +developed tooling to check docstrings in a variety of ways. +<https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py> +contains the checks. + +Like many other projects, pandas uses the +[numpydoc](https://numpydoc.readthedocs.io/en/latest/) style for writing +docstrings. With the collaboration of the numpydoc maintainers, we'd +like to move the checks to a package other than pandas so that other +projects can easily use them as well. + +## Performance monitoring + +Pandas uses [airspeed velocity](https://asv.readthedocs.io/en/stable/) +to monitor for performance regressions. ASV itself is a fabulous tool, +but requires some additional work to be integrated into an open source +project's workflow. + +The [asv-runner](https://github.com/asv-runner) organization, currently +made up of pandas maintainers, provides tools built on top of ASV. We +have a physical machine for running a number of project's benchmarks, +and tools managing the benchmark runs and reporting on results. + +We'd like to fund improvements and maintenance of these tools to + +- Be more stable. Currently, they're maintained on the nights and + weekends when a maintainer has free time. +- Tune the system for benchmarks to improve stability, following + <https://pyperf.readthedocs.io/en/latest/system.html> +- Build a GitHub bot to request ASV runs *before* a PR is merged. + Currently, the benchmarks are only run nightly. + +## Roadmap Evolution + +Pandas continues to evolve. The direction is primarily determined by +community interest. Everyone is welcome to review existing items on the +roadmap and to propose a new item. + +Each item on the roadmap should be a short summary of a larger design +proposal. The proposal should include + +1. Short summary of the changes, which would be appropriate for + inclusion in the roadmap if accepted. +2. Motivation for the changes. +3. An explanation of why the change is in scope for pandas. +4. Detailed design: Preferably with example-usage (even if not + implemented yet) and API documentation +5. API Change: Any API changes that may result from the proposal. + +That proposal may then be submitted as a GitHub issue, where the pandas +maintainers can review and comment on the design. The [pandas mailing +list](https://mail.python.org/mailman/listinfo/pandas-dev) should be +notified of the proposal. + +When there's agreement that an implementation would be welcome, the +roadmap should be updated to include the summary and a link to the +discussion issue. diff --git a/web/pandas/community/team.md b/web/pandas/community/team.md new file mode 100644 index 0000000000000..c0a15081e1fa8 --- /dev/null +++ b/web/pandas/community/team.md @@ -0,0 +1,101 @@ +# Team + +## Contributors + +_pandas_ is made with love by more than [1,500 volunteer contributors](https://github.com/pandas-dev/pandas/graphs/contributors). + +If you want to support pandas development, you can find information in the [donations page](../donate.html). + +## Maintainers + +<div class="row maintainers"> + {% for row in maintainers.people | batch(6, "") %} + <div class="card-group maintainers"> + {% for person in row %} + {% if person %} + <div class="card"> + <img class="card-img-top" alt="" src="{{ person.avatar_url }}"/> + <div class="card-body"> + <h6 class="card-title"> + {% if person.blog %} + <a href="{{ person.blog }}"> + {{ person.name or person.login }} + </a> + {% else %} + {{ person.name or person.login }} + {% endif %} + </h6> + <p class="card-text small"><a href="{{ person.html_url }}">{{ person.login }}</a></p> + </div> + </div> + {% else %} + <div class="card border-0"></div> + {% endif %} + {% endfor %} + </div> + {% endfor %} +</div> + +## BDFL + +Wes McKinney is the Benevolent Dictator for Life (BDFL). + +## Governance + +The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance). + +## NumFOCUS + +![](https://numfocus.org/wp-content/uploads/2018/01/optNumFocus_LRG.png) + +_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States. +NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the +health and sustainability of the project. Visit numfocus.org for more information. + +Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible +to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation. + +## Code of conduct committee + +<ul> + {% for person in maintainers.coc %} + <li>{{ person }}</li> + {% endfor %} +</ul> + +## NumFOCUS committee + +<ul> + {% for person in maintainers.numfocus %} + <li>{{ person }}</li> + {% endfor %} +</ul> + +## Institutional partners + +<ul> + {% for company in partners.active if company.employs %} + <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li> + {% endfor %} +</ul> + +In-kind sponsors + +- [Indeed](https://opensource.indeedeng.io/): Logo and website design +- Can we find a donor for the hosting (website, benchmarks,...?) + +## Emeritus maintainers + +<ul> + {% for person in maintainers.emeritus %} + <li>{{ person }}</li> + {% endfor %} +</ul> + +## Past institutional partners + +<ul> + {% for company in partners.past %} + <li><a href="{{ company.url }}">{{ company.name }}</a></li> + {% endfor %} +</ul> diff --git a/web/pandas/config.yml b/web/pandas/config.yml new file mode 100644 index 0000000000000..ba979e220f3bd --- /dev/null +++ b/web/pandas/config.yml @@ -0,0 +1,129 @@ +main: + templates_path: _templates + base_template: "layout.html" + ignore: + - _templates/layout.html + - config.yml + - blog.html # blog will be added at a later stage + - try.md # the binder page will be added later + github_repo_url: pandas-dev/pandas + context_preprocessors: + - pandas_web.Preprocessors.navbar_add_info + # - pandas_web.Preprocessors.blog_add_posts + - pandas_web.Preprocessors.maintainers_add_info + - pandas_web.Preprocessors.home_add_releases + markdown_extensions: + - toc + - tables + - fenced_code +static: + logo: # path to the logo when it's in the repo + css: + - /static/css/pandas.css +navbar: + - name: "Install" + target: /install.html + - name: "Documentation" + target: + - name: "Getting started" + target: https://pandas.pydata.org/pandas-docs/stable/getting_started/index.html + - name: "User guide" + target: https://pandas.pydata.org/pandas-docs/stable/user_guide/index.html + - name: "API reference" + target: https://pandas.pydata.org/pandas-docs/stable/reference/index.html + - name: "Contributing to pandas" + target: https://pandas.pydata.org/pandas-docs/stable/development/index.html + - name: "Release notes" + target: https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html + - name: "Community" + target: + - name: "About pandas" + target: /community/about.html + - name: "Project roadmap" + target: /community/roadmap.html + - name: "Ecosystem" + target: /community/ecosystem.html + - name: "Ask a question (StackOverflow)" + target: https://stackoverflow.com/questions/tagged/pandas + - name: "Discuss (mailing list)" + target: https://groups.google.com/forum/#!forum/pydata + - name: "Team" + target: /community/team.html + - name: "Code of Conduct" + target: /community/coc.html + - name: "Citing pandas" + target: /community/citing.html + # - name: "Blog" + # target: /blog.html + - name: "Donate" + target: /donate.html +blog: + num_posts: 8 + feed: + - https://wesmckinney.com/feeds/pandas.atom.xml + - https://tomaugspurger.github.io/feed + - https://jorisvandenbossche.github.io/feeds/all.atom.xml + - https://datapythonista.github.io/blog/feeds/pandas.atom.xml + - https://numfocus.org/tag/pandas/feed/ +maintainers: + active: + - wesm + - jorisvandenbossche + - TomAugspurger + - shoyer + - jreback + - chris-b1 + - sinhrks + - cpcloud + - gfyoung + - toobaz + - WillAyd + - mroeschke + - jschendel + - jbrockmendel + - datapythonista + - simonjayhawkins + - topper-123 + emeritus: + - Wouter Overmeire + - Skipper Seabold + - Jeff Tratner + coc: + - Safia Abdalla + - Tom Augspurger + - Joris Van den Bossche + - Camille Scott + - Nathaniel Smith + numfocus: + - Phillip Cloud + - Stephan Hoyer + - Wes McKinney + - Jeff Reback + - Joris Van den Bossche +partners: + active: + - name: "NumFOCUS" + url: https://numfocus.org/ + logo: /static/img/partners/numfocus.svg + - name: "Anaconda" + url: https://www.anaconda.com/ + logo: /static/img/partners/anaconda.svg + employs: "Tom Augspurger, Brock Mendel" + - name: "Two Sigma" + url: https://www.twosigma.com/ + logo: /static/img/partners/two_sigma.svg + employs: "Phillip Cloud, Jeff Reback" + - name: "RStudio" + url: https://www.rstudio.com/ + logo: /static/img/partners/r_studio.svg + employs: "Wes McKinney" + - name: "Ursa Labs" + url: https://ursalabs.org/ + logo: /static/img/partners/ursa_labs.svg + employs: "Wes McKinney, Joris Van den Bossche" + - name: "Tidelift" + url: https://tidelift.com + logo: /static/img/partners/tidelift.svg + past: + - name: "Paris-Saclay Center for Data Science" + url: https://www.datascience-paris-saclay.fr/ diff --git a/web/pandas/donate.md b/web/pandas/donate.md new file mode 100644 index 0000000000000..5badb4c5a2031 --- /dev/null +++ b/web/pandas/donate.md @@ -0,0 +1,25 @@ +# Donate to pandas + +_pandas_ is and always will be **free**. To make de development sustainable, we need _pandas_ users, corporate +or individual, to support the development by providing their time and money. + +You can find more information about current developers and supporters in the [team page](community/team.html). +Financial contributions will mainly be used to advance in the [pandas roadmap](community/roadmap.html). + +- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org) +- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page](https://pandas.pydata.org/pandas-docs/stable/development/index.html) +- If you want to support _pandas_ with a **donation**, please use the form below: + + +<div id="salsalabs-donate-container"> +</div> +<script type="text/javascript" + src="https://default.salsalabs.org/api/widget/template/4ba4e328-1855-47c8-9a89-63e4757d2151/?tId=salsalabs-donate-container"> +</script> + +_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States. +NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the +health and sustainability of the project. Visit numfocus.org for more information. + +Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible +to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation. diff --git a/web/pandas/index.html b/web/pandas/index.html new file mode 100644 index 0000000000000..696f0862aa109 --- /dev/null +++ b/web/pandas/index.html @@ -0,0 +1,114 @@ +{% extends "layout.html" %} +{% block body %} + <div class="container"> + <div class="row"> + <div class="col-md-9"> + <section class="jumbotron text-center"> + <h1>pandas</h1> + <p> + <strong>pandas</strong> is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,<br/> + built on top of the <a href="http://www.python.org">Python</a> programming language. + </p> + <p> + <a class="btn btn-primary" href="{{ base_url }}/install.html">Install pandas now!</a> + </p> + </section> + + <div class="row"> + <div class="col-md-4"> + <h5>Getting started</h5> + <ul> + <!-- <li><a href="{{ base_url }}/try.html">Try pandas online</a></li> --> + <li><a href="{{ base_url }}/install.html">Install pandas</a></li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/getting_started/index.html">Getting started</a></li> + </ul> + </div> + <div class="col-md-4"> + <h5>Documentation</h5> + <ul> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/user_guide/index.html">User guide</a></li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/reference/index.html">API reference</a></li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/development/index.html">Contributing to pandas</a></li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html">Release notes</a></li> + </ul> + </div> + <div class="col-md-4"> + <h5>Community</h5> + <ul> + <li><a href="{{ base_url }}/community/about.html">About pandas</a></li> + <li><a href="https://stackoverflow.com/questions/tagged/pandas">Ask a question</a></li> + <li><a href="{{ base_url }}/community/ecosystem.html">Ecosystem</a></li> + </ul> + </div> + </div> + <section> + <h5>With the support of:</h5> + <div class="row h-100"> + {% for company in partners.active %} + <div class="col-sm-6 col-md-2 my-auto"> + <a href="{{ company.url }}" target="_blank"> + <img class="img-fluid" alt="{{ company.name }}" src="{{ base_url }}{{ company.logo }}"/> + </a> + </div> + {% endfor %} + </div> + </section> + </div> + <div class="col-md-3"> + {% if releases %} + <h4>Latest version: {{ releases[0].name }}</h4> + <ul> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/v0.25.0.html">What's new in {{ releases[0].name }}</a></li> + <li>Release date:<br/>{{ releases[0].published.strftime("%b %d, %Y") }}</li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/">Documentation (web)</a></li> + <li><a href="https://pandas.pydata.org/pandas-docs/stable/pandas.pdf">Documentation (pdf)</a></li> + <li><a href="{{ releases[0].url }}">Download source code</a></li> + </ul> + {% endif %} + <h4>Follow us</h4> + <div class="text-center"> + <p> + <a href="https://twitter.com/pandas_dev?ref_src=twsrc%5Etfw" class="twitter-follow-button" data-show-count="false">Follow @pandas_dev</a><script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> + </p> + </div> + <h4>Get the book</h4> + <p class="book"> + <a href="https://amzn.to/2KI5JJw"> + <img class="img-fluid" alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/> + </a> + </p> + {% if releases[1:5] %} + <h4>Previous versions</h4> + <ul> + {% for release in releases[1:5] %} + <li class="small"> + {{ release.name }} ({{ release.published.strftime("%b %d, %Y") }})<br/> + <a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/{{ release.tag }}.html">changelog</a> | + <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/">docs</a> | + <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/pandas.pdf">pdf</a> | + <a href="{{ release.url }}">code</a> + </li> + {% endfor %} + </ul> + {% endif %} + {% if releases[5:] %} + <p class="text-center"> + <a data-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a> + </p> + <ul id="show-more-releases" class="collapse"> + {% for release in releases[5:] %} + <li class="small"> + {{ release.name }} ({{ release.published.strftime("%Y-%m-%d") }})<br/> + <a href="https://pandas.pydata.org/pandas-docs/stable/whatsnew/{{ release.tag }}.html">changelog</a> | + <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/">docs</a> | + <a href="https://pandas.pydata.org/pandas-docs/version/{{ release.name }}/pandas.pdf">pdf</a> | + <a href="{{ release.url }}">code</a> + </li> + {% endfor %} + </ul> + {% endif %} + </div> + </div> + </div> + +{% endblock %} diff --git a/web/pandas/install.md b/web/pandas/install.md new file mode 100644 index 0000000000000..c6cccd803e33e --- /dev/null +++ b/web/pandas/install.md @@ -0,0 +1,28 @@ +# Installation instructions + +The next steps provides the easiest and recommended way to set up your +environment to use pandas. Other installation options can be found in +the [advanced installation page](https://pandas.pydata.org/pandas-docs/stable/install.html). + +1. Download [Anaconda](https://www.anaconda.com/distribution/) for your operating system and + the latest Python version, run the installer, and follow the steps. Detailed instructions + on how to install Anaconda can be found in the + [Anaconda documentation](https://docs.anaconda.com/anaconda/install/)). + +2. In the Anaconda prompt (or terminal in Linux or MacOS), start JupyterLab: + + <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/anaconda_prompt.png"/> + +3. In JupyterLab, create a new (Python 3) notebook: + + <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/jupyterlab_home.png"/> + +4. In the first cell of the notebook, you can import pandas and check the version with: + + <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/pandas_import_and_version.png"/> + +5. Now you are ready to use pandas you can write your code in the next cells. + + +You can learn more about pandas in the [tutorials](#), and more about JupyterLab +in the [JupyterLab documentation](https://jupyterlab.readthedocs.io/en/stable/user/interface.html). diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css new file mode 100644 index 0000000000000..5911de96b5fa9 --- /dev/null +++ b/web/pandas/static/css/pandas.css @@ -0,0 +1,16 @@ +body { + padding-top: 5em; + padding-bottom: 3em; +} +code { + white-space: pre; +} +a.navbar-brand img { + max-height: 2em; +} +div.card { + margin: 0 0 .2em .2em !important; +} +.book { + padding: 0 20%; +} diff --git a/web/pandas/static/img/install/anaconda_prompt.png b/web/pandas/static/img/install/anaconda_prompt.png new file mode 100644 index 0000000000000..7b547e4ebb02a Binary files /dev/null and b/web/pandas/static/img/install/anaconda_prompt.png differ diff --git a/web/pandas/static/img/install/jupyterlab_home.png b/web/pandas/static/img/install/jupyterlab_home.png new file mode 100644 index 0000000000000..c62d33a5e0fc6 Binary files /dev/null and b/web/pandas/static/img/install/jupyterlab_home.png differ diff --git a/web/pandas/static/img/install/pandas_import_and_version.png b/web/pandas/static/img/install/pandas_import_and_version.png new file mode 100644 index 0000000000000..64c1303ac495c Binary files /dev/null and b/web/pandas/static/img/install/pandas_import_and_version.png differ diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg new file mode 120000 index 0000000000000..2e5d3872e4845 --- /dev/null +++ b/web/pandas/static/img/pandas.svg @@ -0,0 +1 @@ +../../../../doc/logo/pandas_logo.svg \ No newline at end of file diff --git a/web/pandas/static/img/partners/anaconda.svg b/web/pandas/static/img/partners/anaconda.svg new file mode 100644 index 0000000000000..fcddf72ebaa28 --- /dev/null +++ b/web/pandas/static/img/partners/anaconda.svg @@ -0,0 +1,99 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + viewBox="0 0 530.44 90.053329" + height="90.053329" + width="530.44" + xml:space="preserve" + id="svg2" + version="1.1"><metadata + id="metadata8"><rdf:RDF><cc:Work + rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs + id="defs6" /><g + transform="matrix(1.3333333,0,0,-1.3333333,0,90.053333)" + id="g10"><g + transform="scale(0.1)" + id="g12"><path + id="path14" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 958.313,274.5 53.637,120.406 h 1.64 L 1068.32,274.5 Z m 67.867,251.754 c -1.65,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 831.887,157.93 c -3.282,-7.117 1.097,-14.231 9.304,-14.231 h 47.618 c 8.754,0 13.679,5.473 15.867,10.942 l 26.82,59.113 h 163.644 l 26.81,-59.113 c 3.83,-7.657 7.66,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path + id="path16" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 1547.94,526.801 h -50.35 c -6.03,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.55 l -227.67,241.91 h -13.68 c -5.48,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.92,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.54 L 1534.8,138.227 h 13.14 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path + id="path18" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 1725.97,274.5 53.64,120.406 h 1.64 L 1835.98,274.5 Z m 67.87,251.754 c -1.64,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 1599.55,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.75,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.64 l 26.81,-59.113 c 3.83,-7.657 7.67,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path + id="path20" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2261.6,241.117 c -3.29,3.285 -9.31,3.836 -13.69,0 -22.98,-18.605 -50.9,-31.191 -83.73,-31.191 -70.06,0 -122.6,58.008 -122.6,126.418 0,68.965 51.99,127.519 122.05,127.519 30.64,0 61.3,-12.039 84.28,-32.285 4.38,-4.379 9.85,-4.379 13.69,0 l 33.38,34.477 c 4.38,4.375 4.38,10.941 -0.55,15.328 -37.21,33.383 -77.17,50.898 -132.45,50.898 -109.45,0 -197.57,-88.117 -197.57,-197.574 0,-109.465 88.12,-196.48 197.57,-196.48 48.72,0 95.78,16.964 133,53.086 3.83,3.835 4.92,10.949 0.55,14.777 l -33.93,35.027" /><path + id="path22" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 2520.21,209.379 c -68.95,0 -125.33,56.371 -125.33,125.328 0,68.957 56.38,126.426 125.33,126.426 68.96,0 125.88,-57.469 125.88,-126.426 0,-68.957 -56.92,-125.328 -125.88,-125.328 z m 0,322.902 c -109.46,0 -196.48,-88.117 -196.48,-197.574 0,-109.465 87.02,-196.48 196.48,-196.48 109.46,0 197.03,87.015 197.03,196.48 0,109.457 -87.57,197.574 -197.03,197.574" /><path + id="path24" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3090.17,526.801 h -50.35 c -6.02,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.54 l -227.68,241.91 h -13.68 c -5.47,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.93,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.55 l 228.77,-251.207 h 13.13 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path + id="path26" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3303.16,210.465 h -62.39 v 250.121 h 62.39 c 71.15,0 123.14,-53.641 123.14,-124.785 0,-71.696 -51.99,-125.336 -123.14,-125.336 z m 6.57,316.336 h -129.71 c -5.47,0 -9.85,-4.922 -9.85,-10.395 V 154.102 c 0,-5.481 4.38,-10.403 9.85,-10.403 h 129.71 c 105.63,0 192.1,85.926 192.1,192.102 0,105.082 -86.47,191 -192.1,191" /><path + id="path28" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3631.32,274.5 53.64,120.406 h 1.64 L 3741.33,274.5 Z m 236.43,-116.57 -168.57,368.324 c -1.64,3.285 -3.82,6.027 -9.29,6.027 h -5.48 c -4.93,0 -7.67,-2.742 -9.3,-6.027 L 3504.9,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.76,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.63 l 26.83,-59.113 c 3.82,-7.657 7.66,-10.942 15.86,-10.942 h 47.62 c 8.21,0 12.59,7.114 9.3,14.231" /><path + id="path30" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3940.9,176.27 h 7.99 c 2.7,0 4.5,-1.793 4.5,-4.403 0,-2.422 -1.8,-4.394 -4.5,-4.394 h -7.99 z m -4.85,-26.582 h 3.33 c 0.99,0 1.7,0.808 1.7,1.707 v 10.148 h 5.57 l 4.49,-10.598 c 0.27,-0.629 0.9,-1.257 1.62,-1.257 h 4.04 c 1.26,0 2.16,1.257 1.53,2.425 -1.53,3.235 -3.15,6.645 -4.76,9.969 2.69,0.984 6.82,3.5 6.82,9.879 0,6.824 -5.48,10.594 -11.04,10.594 h -13.3 c -0.98,0 -1.7,-0.809 -1.7,-1.703 v -29.457 c 0,-0.899 0.72,-1.707 1.7,-1.707" /><path + id="path32" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 3945.93,192.078 c 14.46,0 26.05,-11.586 26.05,-26.043 0,-14.371 -11.59,-26.047 -26.05,-26.047 -14.37,0 -26.04,11.676 -26.04,26.047 0,14.457 11.67,26.043 26.04,26.043 z m 0,-58.285 c 17.79,0 32.33,14.461 32.33,32.242 0,17.781 -14.54,32.328 -32.33,32.328 -17.78,0 -32.24,-14.547 -32.24,-32.328 0,-17.781 14.46,-32.242 32.24,-32.242" /><path + id="path34" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 125.527,158.422 0.051,2.484 c 0.414,19.649 1.977,39.149 4.684,57.961 l 0.254,1.77 -1.668,0.679 c -17.871,7.305 -35.4574,15.782 -52.2699,25.219 l -2.1172,1.184 -1.0742,-2.16 C 62.3164,223.238 52.9844,199.707 45.6836,175.602 l -0.7031,-2.254 2.2812,-0.629 C 72.0234,165.91 97.5195,161.184 123.051,158.66 l 2.476,-0.238" /><path + id="path36" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 177.781,500.941 c 0.032,0.196 0.063,0.395 0.094,0.59 -14.668,-0.258 -29.324,-1.265 -43.926,-2.965 1.891,-14.777 4.481,-29.437 7.828,-43.925 10.02,16.949 22.121,32.511 36.004,46.3" /><path + id="path38" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 125.527,140.855 -0.039,2.051 -2.043,0.199 c -21.406,2.02 -43.2223,5.661 -64.8278,10.821 l -5.668,1.355 3.211,-4.855 C 75.5742,121.098 99.3125,95.0195 126.73,72.9258 l 4.43,-3.5899 -0.719,5.668 c -2.906,22.6719 -4.554,44.8321 -4.914,65.8511" /><path + id="path40" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 230.566,657.227 c -26.32,-9.008 -51.164,-21.161 -74.101,-36.036 17.359,-3.07 34.469,-7.097 51.273,-12.027 6.696,16.375 14.297,32.426 22.828,48.063" /><path + id="path42" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 339.918,675.43 c -13.023,0 -25.848,-0.813 -38.488,-2.25 17.925,-12.489 35.066,-26.145 51.238,-41.051 l 13.43,-12.391 -13.168,-12.672 c -10.899,-10.488 -21.559,-21.898 -31.688,-33.918 l -0.512,-0.585 c -0.117,-0.125 -2.003,-2.219 -5.152,-6.055 8,0.84 16.117,1.293 24.34,1.293 127.07,0 230.086,-103.016 230.086,-230.086 0,-127.074 -103.016,-230.086 -230.086,-230.086 -44.094,0 -85.277,12.426 -120.277,33.934 -17.27,-1.918 -34.629,-2.922 -52.012,-2.922 -8.074,0 -16.152,0.211 -24.227,0.629 0.524,-26.172 3.016,-53.3052 7.477,-81.438 C 204.82,21.3242 269.879,0 339.918,0 c 186.516,0 337.715,151.199 337.715,337.715 0,186.512 -151.199,337.715 -337.715,337.715" /><path + id="path44" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 295.145,595.602 c 6.726,7.968 13.671,15.695 20.765,23.101 -15.824,13.469 -32.531,25.758 -50.004,36.856 -10.742,-18.161 -20.09,-36.977 -28.093,-56.282 15.195,-5.574 30.066,-11.953 44.589,-19.031 6.711,8.617 11.399,13.883 12.743,15.356" /><path + id="path46" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 65.9219,402.934 1.289,-2.09 2.0118,1.433 c 15.6289,11.235 32.0823,21.594 48.9103,30.789 l 1.582,0.864 -0.449,1.738 c -5.028,19.227 -8.868,39.055 -11.414,58.941 l -0.305,2.399 -2.387,-0.434 C 80.168,492.027 55.4609,485.344 31.7383,476.703 l -2.2227,-0.816 0.8789,-2.188 c 9.7422,-24.562 21.6914,-48.363 35.5274,-70.765" /><path + id="path48" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="M 62.0469,370.18 60.125,368.629 C 41.9492,353.844 24.7266,337.414 8.93359,319.797 L 7.375,318.066 9.13281,316.531 C 26.6641,301.188 45.5547,287.094 65.2734,274.645 l 2.0274,-1.293 1.2031,2.097 c 8.8828,15.781 18.8945,31.356 29.7695,46.278 l 1.0938,1.503 -1.2383,1.383 c -12.3281,13.746 -23.9883,28.395 -34.668,43.547 l -1.414,2.02" /><path + id="path50" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 194.48,157.273 5.868,0.348 -4.559,3.723 c -17.976,14.715 -33.625,32.09 -46.453,51.656 l -0.106,0.621 -3.75,1.649 -0.433,-3.184 c -2.262,-16.856 -3.586,-34.566 -3.945,-52.625 l -0.039,-2.215 2.207,-0.129 c 8.003,-0.429 16.078,-0.644 24.171,-0.644 9.004,0 18.032,0.269 27.039,0.8" /><path + id="path52" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 183.219,530.238 c 3.633,16.649 8.109,33.121 13.511,49.317 -21.125,6.078 -42.769,10.617 -64.789,13.523 -1.867,-22.047 -2.082,-44.082 -0.707,-65.941 17.278,1.988 34.629,3.011 51.985,3.101" /><path + id="path54" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 215.813,531.414 c 14.707,9.441 30.539,17.266 47.281,23.195 -11.875,5.59 -24,10.661 -36.348,15.184 -4.219,-12.633 -7.863,-25.441 -10.933,-38.379" /><path + id="path56" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 58.6914,257.121 -1.7773,1.113 C 39.4922,269.16 22.6055,281.363 6.74609,294.496 l -4.51953,3.742 0.76953,-5.812 C 7.30078,260.039 16.2734,228.496 29.6406,198.684 l 2.3672,-5.278 1.9024,5.465 c 6.6406,19.125 14.6601,38.102 23.8281,56.387 l 0.9531,1.863" /><path + id="path58" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="M 102.133,577.48 C 81.9766,557.492 64.3555,534.969 49.7266,510.445 c 17.4804,5.215 35.1836,9.371 53.0194,12.528 -1.23,18.082 -1.465,36.273 -0.613,54.507" /><path + id="path60" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 112.121,340.762 0.234,5.824 c 0.79,20.598 4.309,40.855 10.461,60.195 l 1.793,5.653 -5.129,-2.961 c -13.152,-7.59 -26.1792,-16.012 -38.7222,-25.047 l -1.8281,-1.328 1.293,-1.86 c 8.6992,-12.406 18.1562,-24.535 28.0973,-36.062 l 3.801,-4.414" /><path + id="path62" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 114.383,305.906 -0.805,5.707 -3.34,-4.691 C 100.836,293.727 92.082,279.945 84.2227,265.961 l -1.1133,-1.992 1.9922,-1.133 c 14.1562,-7.984 29.0114,-15.305 44.1564,-21.762 l 5.402,-2.316 -2.406,5.363 c -8.863,19.668 -14.875,40.453 -17.871,61.785" /><path + id="path64" + style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" + d="m 48.6602,386.676 1.5976,1.273 -1.0781,1.735 c -10.5859,16.918 -20.1836,34.707 -28.5469,52.867 l -2.457,5.355 -1.8125,-5.605 C 6.51172,411.789 1.05859,379.887 0.160156,347.473 L 0,341.523 4.10938,345.82 c 14.01172,14.598 28.99612,28.34 44.55082,40.856" /></g></g></svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/numfocus.svg b/web/pandas/static/img/partners/numfocus.svg new file mode 100644 index 0000000000000..fcdd87b41e475 --- /dev/null +++ b/web/pandas/static/img/partners/numfocus.svg @@ -0,0 +1,60 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- Generator: Adobe Illustrator 19.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 432 135.7" style="enable-background:new 0 0 432 135.7;" xml:space="preserve"> +<style type="text/css"> + .st0{fill:#F1563F;} + .st1{fill:#008896;} +</style> +<g> + <g> + <g> + <path class="st0" d="M97.9,12.2v51.9c0,12.7-6.8,19.7-19.1,19.7c-12.2,0-19-7-19-19.7V12.2h5v51.9c0,9.8,4.8,14.9,14,14.9 c9.2,0,14.1-5.2,14.1-14.9V12.2H97.9z"/> + </g> + <g> + <path class="st1" d="M329.8,29.8c0-0.3,0.1-0.7,0.1-1c0-8.3-6.9-16.7-20.1-16.7c-13.1,0-20.6,7.7-20.6,21.2v29.5 c0,13.5,7.4,21.2,20.3,21.2c13.4,0,20.4-8.4,20.4-16.7c0-0.3,0-0.8-0.1-1.4l-7.8,0c-0.7,4.6-1.7,10.4-12,10.4 c-9,0-13-4.1-13-13.4V33.3c0-9.2,4-13.4,12.7-13.4c7.7,0,11.8,3.4,12.2,10.1L329.8,29.8z"/> + </g> + <g> + <path class="st1" d="M376.2,12.4v50.3c0,13.6-7.3,21.2-20.5,21.2c-13.2,0-20.4-7.5-20.4-21.2V12.4h7.9v50.3 c0,9,4.1,13.4,12.5,13.4c8.4,0,12.6-4.5,12.6-13.4V12.4H376.2z"/> + </g> + <g> + <path class="st1" d="M414.9,22.6c-2-1-6-2.9-11.3-2.9c-8.4,0-12.6,3.4-12.6,10c0,7.1,4.8,9.1,12.5,11.8 c8.3,2.9,18.6,6.5,18.6,21.9c0,13-7.5,20.5-20.6,20.5c-8.2,0-14.2-2.7-17.3-6c-1.2-1.3-0.5-0.6-1.2-1.6l5.3-5.1 c1.9,2.2,5,5,12.8,5c8.7,0,13.2-4.1,13.2-12.3c0-9.9-6.6-12.3-14.3-15.1C392,46,383,42.8,383,30.1c0-11.3,7.7-18.1,20.6-18.1 c5.5,0,12.5,1.3,15.4,4.1L414.9,22.6z"/> + </g> + <g> + <path class="st1" d="M283.5,47.2c0-21.2-17.2-38.5-38.5-38.5c-21.2,0-38.5,17.2-38.5,38.5c0,21.2,17.2,38.5,38.5,38.5 C266.2,85.6,283.5,68.4,283.5,47.2z M213.1,47.2c0-17.6,14.3-31.9,31.9-31.9c17.6,0,31.9,14.3,31.9,31.9 c0,17.6-14.3,31.9-31.9,31.9C227.4,79.1,213.1,64.8,213.1,47.2z"/> + </g> + <g> + <path class="st0" d="M233.9,32.3c1.2,0,2.1-1,2.1-2.3c0-1.3-0.9-2.3-2.1-2.3h-7.3c-1.3,0-2.3,1-2.3,2.2v34.5c0,1.2,1,2.2,2.3,2.2 h7.3c1.2,0,2.1-1,2.1-2.3c0-1.3-1-2.3-2.1-2.3h-4.9V32.3H233.9z"/> + </g> + <g> + <path class="st0" d="M256.1,62c-1.2,0-2.2,1-2.2,2.3c0,1.3,1,2.3,2.2,2.3h7.3c1.3,0,2.3-1,2.3-2.2V29.9c0-1.2-1-2.2-2.3-2.2h-7.3 c-1.2,0-2.2,1-2.2,2.3c0,1.3,1,2.3,2.2,2.3h4.9V62H256.1z"/> + </g> + <polygon class="st1" points="208.7,19.8 208.7,12.1 171.8,12.1 171.8,83.7 179.7,83.7 179.7,51.5 196.2,51.5 196.2,43.9 179.7,43.9 179.7,19.8 "/> + <polygon class="st0" points="156.6,12.2 152.3,12.2 133.2,51.9 113.9,12.2 109.7,12.2 109.7,83.7 114.6,83.7 114.6,24.3 133.1,61.9 151.6,24.4 151.6,83.7 156.6,83.7 "/> + <polygon class="st0" points="44.6,83.7 48.1,83.7 48.1,12.1 43.1,12.1 43.2,70.5 14.2,12.2 10.1,12.2 10.1,83.7 15.1,83.7 14.9,23.1 "/> + </g> + <g id="XMLID_3_"> + <path class="st1" d="M34.9,125.3c-1.2,0-2.3-0.2-3.2-0.5c-0.9-0.3-1.6-0.8-2.1-1.5c-0.5-0.7-0.9-1.4-1.1-2.3 c-0.2-0.9-0.4-1.9-0.4-3v-8.5c0-2.3,0.5-4.1,1.6-5.3c1.1-1.2,2.8-1.8,5.2-1.8c2.4,0,4.1,0.6,5.2,1.8c1.1,1.2,1.6,3,1.6,5.3v8.5 c0,2.3-0.5,4.1-1.6,5.4C39.1,124.7,37.3,125.3,34.9,125.3z M33.3,122.5c0.4,0.2,1,0.3,1.7,0.3c0.7,0,1.2-0.1,1.7-0.3 c0.4-0.2,0.8-0.5,1-0.9c0.2-0.4,0.4-0.8,0.5-1.3c0.1-0.5,0.1-1,0.1-1.7v-9.9c0-0.7,0-1.2-0.1-1.7c-0.1-0.5-0.2-0.9-0.5-1.2 c-0.2-0.4-0.6-0.7-1-0.8c-0.4-0.2-1-0.3-1.7-0.3c-0.7,0-1.2,0.1-1.7,0.3c-0.4,0.2-0.8,0.5-1,0.8c-0.2,0.4-0.4,0.8-0.5,1.2 c-0.1,0.5-0.1,1-0.1,1.7v9.9c0,0.7,0,1.3,0.1,1.7c0.1,0.5,0.2,0.9,0.5,1.3C32.5,122.1,32.8,122.4,33.3,122.5z"/> + <path class="st1" d="M46.7,125v-22.5h6.2c2.2,0,3.7,0.5,4.7,1.6c1,1.1,1.5,2.6,1.5,4.6c0,1.8-0.5,3.3-1.6,4.3 c-1,1-2.6,1.5-4.6,1.5h-2.7V125H46.7z M50.2,112.3h1.6c1.5,0,2.6-0.2,3.1-0.7c0.6-0.5,0.9-1.4,0.9-2.8c0-0.6,0-1,0-1.4 c0-0.4-0.1-0.7-0.2-1c-0.1-0.3-0.2-0.6-0.4-0.8c-0.2-0.2-0.4-0.3-0.7-0.5c-0.3-0.1-0.7-0.2-1.1-0.3c-0.4,0-0.9-0.1-1.5-0.1h-1.6 V112.3z"/> + <path class="st1" d="M63,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H63z"/> + <path class="st1" d="M77.1,125v-22.5h2.4l7.1,15v-15h2.9V125h-2.2L80,109.7V125H77.1z"/> + <path class="st1" d="M109.6,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.8,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.6-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1s0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C111.4,125.2,110.6,125.3,109.6,125.3z"/> + <path class="st1" d="M127.1,125.3c-1.2,0-2.3-0.2-3.2-0.5c-0.9-0.3-1.6-0.8-2.1-1.5c-0.5-0.7-0.9-1.4-1.1-2.3 c-0.2-0.9-0.4-1.9-0.4-3v-8.5c0-2.3,0.5-4.1,1.6-5.3c1.1-1.2,2.8-1.8,5.2-1.8c2.4,0,4.1,0.6,5.2,1.8c1.1,1.2,1.6,3,1.6,5.3v8.5 c0,2.3-0.5,4.1-1.6,5.4C131.2,124.7,129.5,125.3,127.1,125.3z M125.4,122.5c0.4,0.2,1,0.3,1.7,0.3c0.7,0,1.2-0.1,1.7-0.3 c0.4-0.2,0.8-0.5,1-0.9c0.2-0.4,0.4-0.8,0.5-1.3c0.1-0.5,0.1-1,0.1-1.7v-9.9c0-0.7,0-1.2-0.1-1.7c-0.1-0.5-0.2-0.9-0.5-1.2 c-0.2-0.4-0.6-0.7-1-0.8c-0.4-0.2-1-0.3-1.7-0.3c-0.7,0-1.2,0.1-1.7,0.3c-0.4,0.2-0.8,0.5-1,0.8c-0.2,0.4-0.4,0.8-0.5,1.2 c-0.1,0.5-0.1,1-0.1,1.7v9.9c0,0.7,0,1.3,0.1,1.7c0.1,0.5,0.2,0.9,0.5,1.3C124.6,122.1,125,122.4,125.4,122.5z"/> + <path class="st1" d="M138.9,125v-22.5h5.4c2.7,0,4.6,0.6,5.7,1.7c1.1,1.1,1.7,2.8,1.7,5.2v8.3c0,2.5-0.5,4.3-1.6,5.5 c-1.1,1.2-2.9,1.8-5.5,1.8H138.9z M142.3,122.8h2c0.5,0,1,0,1.4-0.1c0.4-0.1,0.7-0.2,1-0.3c0.3-0.1,0.5-0.3,0.7-0.6 c0.2-0.3,0.3-0.6,0.4-0.8c0.1-0.2,0.2-0.6,0.2-1.1c0-0.5,0.1-0.9,0.1-1.2c0-0.3,0-0.8,0-1.5v-7.3c0-0.5,0-1,0-1.4 c0-0.4-0.1-0.7-0.1-1.1c-0.1-0.4-0.2-0.7-0.3-0.9c-0.1-0.2-0.3-0.5-0.5-0.7c-0.2-0.2-0.4-0.4-0.7-0.5c-0.3-0.1-0.6-0.2-1-0.3 c-0.4-0.1-0.8-0.1-1.3-0.1h-1.9V122.8z"/> + <path class="st1" d="M156.6,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H156.6z"/> + <path class="st1" d="M178.9,112.4v-2.3h9.4v2.3H178.9z M178.9,117.3v-2.3h9.4v2.3H178.9z"/> + <path class="st1" d="M202.1,125v-22.5h5.7c2.3,0,3.9,0.5,5,1.4c1.1,0.9,1.6,2.3,1.6,4.3c0,2.9-1.2,4.5-3.6,4.8 c1.5,0.3,2.5,1,3.2,1.9c0.7,0.9,1,2.2,1,3.8c0,2-0.5,3.6-1.6,4.7c-1,1.1-2.7,1.7-4.8,1.7H202.1z M205.6,111.9h2 c1.4,0,2.4-0.3,3-0.9c0.6-0.6,0.8-1.5,0.8-2.9c0-0.4,0-0.8-0.1-1.2s-0.2-0.6-0.3-0.8c-0.1-0.2-0.3-0.4-0.5-0.6 c-0.2-0.2-0.5-0.3-0.7-0.4c-0.2-0.1-0.5-0.2-0.9-0.2c-0.4,0-0.8-0.1-1.1-0.1c-0.4,0-0.8,0-1.4,0h-0.8V111.9z M205.6,122.8h2.3 c1.5,0,2.5-0.3,3.1-1c0.6-0.6,0.8-1.7,0.8-3.2c0-1.4-0.3-2.5-1-3.2c-0.7-0.7-1.7-1.1-3.2-1.1h-2.1V122.8z"/> + <path class="st1" d="M219.7,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H219.7z"/> + <path class="st1" d="M236.4,125v-20.2h-4.6v-2.3h12.4v2.3h-4.4V125H236.4z"/> + <path class="st1" d="M250.2,125v-20.2h-4.6v-2.3h12.4v2.3h-4.4V125H250.2z"/> + <path class="st1" d="M261.5,125v-22.5h9.6v2.3H265v7.4h5v2.2h-5v8.3h6.2v2.3H261.5z"/> + <path class="st1" d="M275.6,125v-22.5h5c2.5,0,4.4,0.5,5.5,1.4c1.2,0.9,1.8,2.5,1.8,4.6c0,2.9-1,4.7-3.1,5.3l3.6,11.3H285 l-3.3-10.6H279V125H275.6z M279,112.2h1.3c1.5,0,2.6-0.3,3.2-0.8c0.6-0.5,1-1.5,1-2.9c0-1.4-0.3-2.3-0.8-2.9 c-0.6-0.6-1.6-0.8-3.1-0.8H279V112.2z"/> + <path class="st1" d="M307.5,125.3c-2.1,0-3.7-0.6-4.8-1.8c-1.1-1.2-1.7-2.8-1.8-4.8l3.1-0.8c0.2,3.2,1.4,4.8,3.5,4.8 c0.9,0,1.6-0.2,2-0.7c0.5-0.4,0.7-1.1,0.7-2c0-0.5-0.1-0.9-0.2-1.3c-0.1-0.4-0.3-0.8-0.6-1.1c-0.3-0.4-0.6-0.7-0.8-0.9 c-0.3-0.2-0.6-0.6-1.1-1l-4.2-3.4c-0.8-0.7-1.5-1.4-1.8-2.2c-0.4-0.8-0.6-1.7-0.6-2.8c0-1.6,0.5-2.9,1.6-3.8 c1-0.9,2.5-1.4,4.3-1.4c2,0,3.5,0.4,4.5,1.4c1,1,1.6,2.4,1.8,4.4l-2.9,0.7c0-0.5-0.1-0.9-0.2-1.3c-0.1-0.4-0.2-0.8-0.3-1.1 c-0.2-0.4-0.4-0.7-0.6-0.9c-0.2-0.2-0.5-0.4-0.9-0.6c-0.4-0.1-0.8-0.2-1.3-0.2c-1.8,0.1-2.7,0.9-2.7,2.5c0,0.7,0.1,1.2,0.4,1.7 c0.3,0.4,0.7,0.9,1.3,1.4l4.2,3.4c1.1,0.9,2,1.8,2.6,2.9c0.6,1,1,2.2,1,3.5c0,1.6-0.5,3-1.7,3.9 C310.7,124.8,309.3,125.3,307.5,125.3z"/> + <path class="st1" d="M323.9,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.9,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.5-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1c0,0.4,0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C325.7,125.2,324.9,125.3,323.9,125.3z"/> + <path class="st1" d="M335.2,125v-22.5h3.4V125H335.2z"/> + <path class="st1" d="M344.2,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H344.2z"/> + <path class="st1" d="M358.3,125v-22.5h2.4l7.1,15v-15h2.9V125h-2.2l-7.2-15.4V125H358.3z"/> + <path class="st1" d="M382.3,125.3c-1,0-1.9-0.1-2.7-0.4c-0.8-0.3-1.4-0.6-1.9-1c-0.5-0.4-0.9-1-1.2-1.6c-0.3-0.6-0.5-1.3-0.7-2 c-0.1-0.7-0.2-1.5-0.2-2.4v-8c0-1,0.1-1.8,0.2-2.5c0.1-0.7,0.3-1.4,0.7-2.1c0.3-0.6,0.7-1.2,1.2-1.6c0.5-0.4,1.1-0.7,1.9-1 c0.8-0.2,1.7-0.4,2.7-0.4c2.2,0,3.9,0.5,4.8,1.6c1,1.1,1.5,2.7,1.5,4.8v1.8h-3.3V109c0-0.3,0-0.6,0-0.8c0-0.2,0-0.4,0-0.7 c0-0.3,0-0.5-0.1-0.7c0-0.2-0.1-0.4-0.2-0.6c-0.1-0.2-0.1-0.4-0.2-0.5c-0.1-0.1-0.2-0.3-0.4-0.4c-0.1-0.1-0.3-0.2-0.5-0.3 c-0.2-0.1-0.4-0.1-0.7-0.2c-0.3,0-0.5-0.1-0.9-0.1c-0.5,0-0.9,0-1.3,0.1c-0.4,0.1-0.7,0.2-0.9,0.4c-0.2,0.2-0.4,0.4-0.6,0.7 c-0.1,0.2-0.3,0.6-0.3,0.9c-0.1,0.4-0.1,0.8-0.1,1.1c0,0.4,0,0.8,0,1.3v8.9c0,1.7,0.2,2.9,0.7,3.5c0.5,0.7,1.3,1,2.5,1 c0.5,0,0.9,0,1.2-0.1c0.3-0.1,0.6-0.2,0.8-0.4c0.2-0.2,0.4-0.4,0.5-0.7c0.1-0.2,0.2-0.5,0.3-0.9c0.1-0.4,0.1-0.7,0.1-1.1 c0-0.3,0-0.8,0-1.3v-1.7h3.3v1.7c0,0.9-0.1,1.6-0.2,2.3c-0.1,0.7-0.3,1.3-0.6,1.9c-0.3,0.6-0.7,1.1-1.1,1.5 c-0.5,0.4-1.1,0.7-1.8,0.9C384.1,125.2,383.2,125.3,382.3,125.3z"/> + <path class="st1" d="M393.4,125v-22.5h9.6v2.3h-6.2v7.4h5v2.2h-5v8.3h6.2v2.3H393.4z"/> + </g> +</g> +</svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/r_studio.svg b/web/pandas/static/img/partners/r_studio.svg new file mode 100644 index 0000000000000..15a1d2a30ff30 --- /dev/null +++ b/web/pandas/static/img/partners/r_studio.svg @@ -0,0 +1,50 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- Generator: Adobe Illustrator 22.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 1784.1 625.9" style="enable-background:new 0 0 1784.1 625.9;" xml:space="preserve"> +<style type="text/css"> + .st0{fill:#75AADB;} + .st1{fill:#4D4D4D;} + .st2{fill:#FFFFFF;} + .st3{fill:url(#SVGID_1_);} + .st4{fill:url(#SVGID_2_);} + .st5{fill:url(#SVGID_3_);} + .st6{fill:url(#SVGID_4_);} + .st7{fill:url(#SVGID_5_);} + .st8{fill:url(#SVGID_6_);} + .st9{fill:url(#SVGID_7_);} + .st10{fill:url(#SVGID_8_);} + .st11{fill:url(#SVGID_9_);} + .st12{fill:url(#SVGID_10_);} + .st13{opacity:0.18;fill:url(#SVGID_11_);} + .st14{opacity:0.3;} +</style> +<g id="Gray_Logo"> +</g> +<g id="Black_Letters"> +</g> +<g id="Blue_Gradient_Letters"> + <g> + + <ellipse transform="matrix(0.7071 -0.7071 0.7071 0.7071 -127.9265 317.0317)" class="st0" cx="318.7" cy="312.9" rx="309.8" ry="309.8"/> + <g> + <path class="st1" d="M694.4,404.8c16.1,10.3,39.1,18.1,63.9,18.1c36.7,0,58.1-19.4,58.1-47.4c0-25.5-14.8-40.8-52.3-54.8 c-45.3-16.5-73.3-40.4-73.3-79.1c0-43.3,35.8-75.4,89.8-75.4c28,0,49,6.6,61,13.6l-9.9,29.3c-8.7-5.4-27.2-13.2-52.3-13.2 c-37.9,0-52.3,22.7-52.3,41.6c0,26,16.9,38.7,55.2,53.6c47,18.1,70.5,40.8,70.5,81.6c0,42.8-31.3,80.3-96.8,80.3 c-26.8,0-56-8.2-70.9-18.1L694.4,404.8z"/> + <path class="st1" d="M943.3,201.3v47.8h51.9v27.6h-51.9v107.5c0,24.7,7,38.7,27.2,38.7c9.9,0,15.7-0.8,21-2.5l1.6,27.6 c-7,2.5-18.1,4.9-32.1,4.9c-16.9,0-30.5-5.8-39.1-15.2c-9.9-11.1-14-28.8-14-52.3V276.7h-30.9v-27.6h30.9V212L943.3,201.3z"/> + <path class="st1" d="M1202.8,393.7c0,21,0.4,39.1,1.6,54.8h-32.1l-2.1-32.5h-0.8c-9.1,16.1-30.5,37.1-65.9,37.1 c-31.3,0-68.8-17.7-68.8-87.3V249.1h36.3v110c0,37.9,11.9,63.9,44.5,63.9c24.3,0,41.2-16.9,47.8-33.4c2.1-4.9,3.3-11.5,3.3-18.5 v-122h36.3V393.7z"/> + <path class="st1" d="M1434.8,156v241c0,17.7,0.8,37.9,1.6,51.5h-32.1l-1.6-34.6h-1.2c-10.7,22.2-34.6,39.1-67.2,39.1 c-48.2,0-85.7-40.8-85.7-101.4c-0.4-66.3,41.2-106.7,89.4-106.7c30.9,0,51.1,14.4,60.2,30.1h0.8V156H1434.8z M1398.9,330.2 c0-4.5-0.4-10.7-1.6-15.2c-5.4-22.7-25.1-41.6-52.3-41.6c-37.5,0-59.7,33-59.7,76.6c0,40.4,20.2,73.8,58.9,73.8 c24.3,0,46.6-16.5,53.1-43.3c1.2-4.9,1.6-9.9,1.6-15.7V330.2z"/> + <path class="st1" d="M1535.7,193c0,12.4-8.7,22.2-23.1,22.2c-13.2,0-21.8-9.9-21.8-22.2c0-12.4,9.1-22.7,22.7-22.7 C1526.6,170.4,1535.7,180.3,1535.7,193z M1495.3,448.5V249.1h36.3v199.4H1495.3z"/> + <path class="st1" d="M1772.2,347.1c0,73.7-51.5,105.9-99.3,105.9c-53.6,0-95.6-39.6-95.6-102.6c0-66.3,44.1-105.5,98.9-105.5 C1733.5,245,1772.2,286.6,1772.2,347.1z M1614.4,349.2c0,43.7,24.7,76.6,60.2,76.6c34.6,0,60.6-32.5,60.6-77.5 c0-33.8-16.9-76.2-59.7-76.2C1632.9,272.1,1614.4,311.7,1614.4,349.2z"/> + </g> + <g> + <path class="st2" d="M424.7,411.8h33.6v26.1h-51.3L322,310.5h-45.3v101.3h44.3v26.1H209.5v-26.1h38.3V187.3l-38.3-4.7v-24.7 c14.5,3.3,27.1,5.6,42.9,5.6c23.8,0,48.1-5.6,71.9-5.6c46.2,0,89.1,21,89.1,72.3c0,39.7-23.8,64.9-60.7,75.6L424.7,411.8z M276.7,285.3l24.3,0.5c59.3,0.9,82.1-21.9,82.1-52.3c0-35.5-25.7-49.5-58.3-49.5c-15.4,0-31.3,1.4-48.1,3.3V285.3z"/> + </g> + <g> + <path class="st1" d="M1751.8,170.4c-12.9,0-23.4,10.5-23.4,23.4c0,12.9,10.5,23.4,23.4,23.4c12.9,0,23.4-10.5,23.4-23.4 C1775.2,180.9,1764.7,170.4,1751.8,170.4z M1771.4,193.8c0,10.8-8.8,19.5-19.5,19.5c-10.8,0-19.5-8.8-19.5-19.5 c0-10.8,8.8-19.5,19.5-19.5C1762.6,174.2,1771.4,183,1771.4,193.8z"/> + <path class="st1" d="M1760.1,203.3l-5.8-8.5c3.3-1.2,5-3.6,5-7c0-5.1-4.3-6.9-8.4-6.9c-1.1,0-2.2,0.1-3.2,0.3 c-1,0.1-2.1,0.2-3.1,0.2c-1.4,0-2.5-0.2-3.7-0.5l-0.6-0.1v3.3l3.4,0.4v18.8h-3.4v3.4h10.9v-3.4h-3.9v-7.9h3.2l7.3,11l0.2,0.2h5.3 v-3.4H1760.1z M1755.6,188.1c0,1.2-0.5,2.2-1.4,2.9c-1.1,0.8-2.8,1.2-5,1.2l-1.9,0v-7.7c1.4-0.1,2.6-0.2,3.7-0.2 C1753.1,184.3,1755.6,185,1755.6,188.1z"/> + </g> + </g> +</g> +<g id="White_Letters"> +</g> +<g id="R_Ball"> +</g> +</svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/tidelift.svg b/web/pandas/static/img/partners/tidelift.svg new file mode 100644 index 0000000000000..af12d68417235 --- /dev/null +++ b/web/pandas/static/img/partners/tidelift.svg @@ -0,0 +1,33 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Generator: Adobe Illustrator 21.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Artwork" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + viewBox="0 0 190.1 33" style="enable-background:new 0 0 190.1 33;" xml:space="preserve"> +<style type="text/css"> + .st0{fill:#4B5168;} + .st1{fill:#F6914D;} +</style> +<g> + <path class="st0" d="M33.4,27.7V5.3c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v22.4c0,2.3,0,2.3-2.4,2.3 + C33.4,29.9,33.4,29.9,33.4,27.7z"/> + <path class="st0" d="M45,26.4V6.6c0-3.6,0-3.6,3.6-3.6h5.8c7.8,0,12.5,3.9,13,10.2c0.2,2.2,0.2,3.4,0,5.5 + c-0.5,6.3-5.3,11.2-13,11.2h-5.8C45,29.9,45,29.9,45,26.4z M54.3,25.4c5.3,0,8-3,8.3-7.1c0.1-1.8,0.1-2.8,0-4.6 + c-0.3-4.2-3-6.1-8.3-6.1h-4.5v17.8H54.3z"/> + <path class="st0" d="M73.8,26.4V6.6c0-3.6,0-3.6,3.6-3.6h13.5c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2H78.6v6.9h11 + c2.2,0,2.2,0,2.2,2.1c0,2.1,0,2.1-2.2,2.1h-11v6.9h12.3c2.3,0,2.3,0,2.3,2.2c0,2.3,0,2.3-2.3,2.3H77.4 + C73.8,29.9,73.8,29.9,73.8,26.4z"/> + <path class="st0" d="M100,26.4v-21c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v20.2h11.9c2.4,0,2.4,0,2.4,2.2c0,2.2,0,2.2-2.4,2.2 + h-13.1C100,29.9,100,29.9,100,26.4z"/> + <path class="st0" d="M125.8,27.7V5.3c0-2.3,0-2.3,2.4-2.3c2.4,0,2.4,0,2.4,2.3v22.4c0,2.3,0,2.3-2.4,2.3 + C125.8,29.9,125.8,29.9,125.8,27.7z"/> + <path class="st0" d="M137.4,27.7V6.6c0-3.6,0-3.6,3.6-3.6h13.5c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2h-12.2v7.2h11.3 + c2.3,0,2.3,0,2.3,2.2c0,2.2,0,2.2-2.3,2.2h-11.3v8.6c0,2.3,0,2.3-2.4,2.3S137.4,29.9,137.4,27.7z"/> + <path class="st0" d="M24.2,3.1H5.5c-2.4,0-2.4,0-2.4,2.2c0,2.2,0,2.2,2.4,2.2h7v4.7v3.2l4.8-3.7v-1.1V7.5h7c2.4,0,2.4,0,2.4-2.2 + C26.6,3.1,26.6,3.1,24.2,3.1z"/> + <path class="st1" d="M12.5,20v7.6c0,2.3,0,2.3,2.4,2.3c2.4,0,2.4,0,2.4-2.3V16.3L12.5,20z"/> + <g> + <path class="st0" d="M165.9,3.1h18.7c2.4,0,2.4,0,2.4,2.2c0,2.2,0,2.2-2.4,2.2h-7v4.7v3.2l-4.8-3.7v-1.1V7.5h-7 + c-2.4,0-2.4,0-2.4-2.2C163.5,3.1,163.5,3.1,165.9,3.1z"/> + <path class="st1" d="M177.6,20v7.6c0,2.3,0,2.3-2.4,2.3c-2.4,0-2.4,0-2.4-2.3V16.3L177.6,20z"/> + </g> +</g> +</svg> diff --git a/web/pandas/static/img/partners/two_sigma.svg b/web/pandas/static/img/partners/two_sigma.svg new file mode 100644 index 0000000000000..d38df12766ed6 --- /dev/null +++ b/web/pandas/static/img/partners/two_sigma.svg @@ -0,0 +1 @@ +<svg width="230" height="42" viewBox="0 0 230 42" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><title>Logo</title><defs><path id="a" d="M19.436 21.668V1.025H0v20.643h19.435z"></path></defs><g fill="none" fill-rule="evenodd"><path fill="#2D2D2D" d="M59.06 13.464h-7.137v-3.155h17.811v3.155H62.6V30.95h-3.54zm14.01-3.155h3.745l4.747 15.66h.06l4.483-15.66h3.301l4.454 15.66h.059l4.777-15.66h3.716L95.895 30.95H92.09l-4.335-15.127h-.059L83.361 30.95h-3.804zm41.214-.355c5.986 0 10.527 4.158 10.527 10.556 0 6.55-4.541 10.794-10.527 10.794-5.985 0-10.558-4.245-10.558-10.794 0-6.398 4.573-10.556 10.558-10.556m0 18.285c3.892 0 6.93-2.89 6.93-7.729 0-4.658-3.007-7.518-6.93-7.518-3.922 0-6.93 2.86-6.93 7.518 0 4.839 3.038 7.73 6.93 7.73m40.846-17.931h3.539V30.95h-3.54V19.41zm18.744-.355c2.832 0 5.222.885 7.313 2.33 0 0-2.026 2.374-2.128 2.311-1.56-1-3.21-1.574-5.096-1.574-4.247 0-7.048 3.068-7.048 7.433 0 4.746 2.624 7.785 7.048 7.785 1.534 0 3.067-.385 4.13-1.003v-4.897h-5.19v-2.623h8.462v9.347c-2.007 1.416-4.63 2.24-7.49 2.24-6.46 0-10.587-4.363-10.587-10.85 0-6.075 4.187-10.499 10.586-10.499m12.506.355h3.57l6.812 9.701 6.811-9.701h3.541V30.95h-3.421V15.558l-6.962 9.73-6.958-9.73V30.95h-3.392z"></path><g transform="translate(210.418 9.283)"><mask id="b" fill="#fff"><use xlink:href="#a"></use></mask><path d="M7.639 1.025h4.158l7.64 20.643H15.63l-1.561-4.454H5.368l-1.533 4.454H0L7.639 1.025zM6.34 14.354h6.725L9.734 4.74h-.06L6.34 14.354z" fill="#2D2D2D" mask="url(#b)"></path></g><path d="M136.826 26.498c1.861 1.007 3.618 1.68 5.887 1.68 2.715 0 4.069-1.18 4.069-2.83 0-4.66-11.616-1.594-11.616-9.466 0-3.303 2.74-5.928 7.37-5.928 2.714 0 5.443.653 7.579 1.902l-2.314 2.361c-1.68-.72-3.11-1.137-5.146-1.137-2.389 0-3.806 1.21-3.806 2.744 0 4.63 11.62 1.473 11.62 9.494 0 3.393-2.567 5.985-7.756 5.985-3.035 0-6.33-1.076-8.273-2.419l2.386-2.386z" fill="#2D2D2D"></path><path fill="#009AA6" d="M20.625 0L0 20.63l20.625 20.628 20.63-20.628z"></path><path d="M9.748 26.478c-.16-6.605 7.789-5.746 7.789-9.13 0-1.1-.863-2.041-2.784-2.041-1.401 0-2.743.701-3.724 1.602l-1.46-1.463c1.259-1.18 3.223-2.14 5.284-2.14 3.304 0 4.986 1.842 4.986 4.003 0 4.986-7.728 4.104-7.728 8.27h7.607v1.98h-9.95l-.02-1.081zm15.937-.5c-1.521 0-2.423-.98-2.423-2.862 0-2.404 1.602-4.566 3.525-4.566 1.5 0 2.402.981 2.402 2.883 0 2.401-1.582 4.545-3.504 4.545zm9.713-9.25h-8.444v.003c-3.437.005-6.033 2.745-6.033 6.403 0 2.905 1.881 4.666 4.544 4.666 3.464 0 6.067-2.743 6.067-6.386 0-1.182-.313-2.173-.856-2.935h2.947l1.775-1.75z" fill="#FFF"></path></g></svg> diff --git a/web/pandas/static/img/partners/ursa_labs.svg b/web/pandas/static/img/partners/ursa_labs.svg new file mode 100644 index 0000000000000..cacc80e337d25 --- /dev/null +++ b/web/pandas/static/img/partners/ursa_labs.svg @@ -0,0 +1,106 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Generator: Adobe Illustrator 23.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + viewBox="0 0 359 270" style="enable-background:new 0 0 359 270;" xml:space="preserve"> +<style type="text/css"> + .st0{fill-rule:evenodd;clip-rule:evenodd;fill:#404040;} + .st1{filter:url(#Adobe_OpacityMaskFilter);} + .st2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;} + .st3{mask:url(#mask-2_1_);} +</style> +<title>HOME 1 Copy 8</title> +<desc>Created with Sketch.</desc> +<g id="HOME-1-Copy-8"> + <g id="Group" transform="translate(20.000000, 20.000000)"> + <path id="URSA-LABS-Copy" class="st0" d="M0,158.4h9.1V214c0,0.3,0,0.7,0.1,1.1c0,0.3,0,0.9,0.1,1.6s0.2,1.5,0.6,2.3 + c0.3,0.8,0.9,1.5,1.6,2.1c0.7,0.6,1.8,0.9,3.3,0.9c0.3,0,0.9,0,1.6-0.1c0.7-0.1,1.4-0.4,2.1-0.9c1-0.9,1.6-2,1.8-3.3 + s0.3-3.2,0.4-5.5v-53.8h9.2v54.4c0,0.6,0,1.3-0.1,2.1c-0.1,0.8-0.2,1.7-0.3,2.6s-0.3,1.8-0.5,2.6c-0.7,2.3-1.7,4.1-3,5.4 + c-1.3,1.3-2.7,2.3-4.2,2.9c-1.5,0.7-2.9,1.1-4.2,1.2c-1.3,0.1-2.3,0.2-3,0.2c-0.6,0-1.5-0.1-2.7-0.2c-1.2-0.1-2.5-0.5-3.8-1 + s-2.6-1.4-3.8-2.5c-1.2-1.1-2.2-2.7-3-4.6c-0.4-1-0.7-2.1-0.9-3.3c-0.2-1.2-0.3-2.9-0.4-5V158.4z M44,158.4h17 + c0.6,0,1.2,0,1.7,0.1c0.6,0.1,1.3,0.2,2.2,0.3c0.9,0.1,1.7,0.4,2.6,0.8c0.8,0.4,1.6,1.1,2.3,2c0.7,0.9,1.2,2.1,1.6,3.7 + c0.4,1.8,0.6,5.1,0.6,10.1c0,1.3,0,2.7-0.1,4.1c0,1.4-0.1,2.8-0.2,4.2c-0.1,0.9-0.3,1.9-0.4,2.9s-0.4,1.9-0.7,2.7 + c-0.4,0.9-0.9,1.6-1.6,2.1s-1.3,0.8-2,1c-0.7,0.2-1.3,0.3-1.9,0.3H64v0.5c1.3,0.1,2.4,0.3,3.3,0.6c0.9,0.3,1.8,1,2.5,2.1 + c0.8,1.3,1.3,2.7,1.5,4.3c0.2,1.6,0.3,3.9,0.3,6.8v7.7c0,2,0,3.6,0.1,4.9c0.1,1.3,0.2,2.4,0.3,3.3c0.1,0.9,0.3,1.8,0.5,2.7 + c0.2,0.9,0.6,1.8,1,2.9h-9.7c-0.3-1.7-0.6-3-0.8-4.1s-0.3-2.2-0.4-3.2c-0.1-1-0.2-2.1-0.2-3.2c0-1.1-0.1-2.5-0.1-4.2v-5 + c-0.1-1.2-0.1-2.4-0.2-3.6c0-1.2-0.1-2.4-0.3-3.6c-0.1-0.9-0.3-1.7-0.5-2.5c-0.2-0.8-0.6-1.5-1.2-2c-0.5-0.3-1-0.5-1.5-0.6 + s-1-0.2-1.6-0.2h-3.8v32.4H44V158.4z M53.4,166.9v21.7h4.4c1.2,0,2.2-0.2,2.9-0.6c0.7-0.4,1.2-1.2,1.6-2.5 + c0.2-0.9,0.3-2.3,0.4-4.2s0.1-4.1,0.1-6.6c0-0.7,0-1.5-0.1-2.2c0-0.8-0.1-1.5-0.2-2.2c-0.1-1.4-0.4-2.3-1-2.8 + c-0.3-0.3-0.8-0.5-1.3-0.5c-0.5,0-1.2,0-2.2,0H53.4z M110.6,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6 + c-0.2-0.5-0.6-1-1.1-1.4s-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6c-0.6,0.4-1.1,1-1.4,1.7c-0.3,0.7-0.5,1.5-0.6,2.3 + c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4c0.2,1.2,0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2s1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1 + c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9 + c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7 + c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3c-2.9,0-5.1-0.5-6.8-1.4s-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3 + s-0.4-3.8-0.5-5.9V203h8.6v12.8c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3 + c1.1,0,2-0.3,2.7-0.8c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4 + c-0.5-1.7-1.4-3.7-2.7-5.9c-1.3-2.3-2.8-4.5-4.3-6.6s-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6 + c-0.6-1.4-1.1-3-1.5-4.7s-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6 + c0.5,0,1.2,0,2.3,0.1c1,0.1,2.1,0.3,3.3,0.7c1.1,0.4,2.2,1.1,3.3,2c1.1,0.9,1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1 + C110.5,166.6,110.5,167.7,110.6,169.1z M140.1,158.4l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H140.1z M133.5,183 + l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8H134 + c-0.1,1.9-0.1,3.8-0.2,5.7C133.7,179.2,133.6,181.1,133.5,183z M190.2,158.4V220h15.4v8.7h-24.7v-70.3H190.2z M232,158.4 + l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H232z M225.4,183l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8 + c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8h-0.8c-0.1,1.9-0.1,3.8-0.2,5.7 + C225.6,179.2,225.5,181.1,225.4,183z M251.9,158.4h16.5c1.5,0,2.9,0.1,4.4,0.2s2.8,0.8,3.9,1.8c1.3,1.2,2,2.7,2.2,4.5 + c0.2,1.8,0.3,4.3,0.4,7.4c0,0.6,0,1.2,0.1,1.8c0,0.6,0.1,1.2,0.1,1.8c0,1.1,0,2.2-0.1,3.3c0,1.1-0.1,2.2-0.2,3.3 + c0,0.2,0,0.9-0.1,2.1c-0.1,1.2-0.3,2.3-0.8,3.3c-0.4,0.7-1,1.3-1.7,1.8c-0.7,0.5-1.4,0.8-2.2,1c-0.4,0.1-0.8,0.2-1.3,0.2 + c-0.5,0-0.8,0-0.9,0.1v0.5c1.3,0.1,2.4,0.4,3.5,0.7c1,0.4,1.9,1.1,2.6,2.2c0.5,1,0.8,2.2,0.9,3.7c0.1,1.5,0.1,3.4,0.1,5.9 + c0.1,0.9,0.1,1.9,0.1,2.8v7c0,1.4-0.1,2.8-0.2,4.3c0,0.2,0,0.6-0.1,1.2c0,0.6-0.2,1.3-0.4,2.1c-0.2,0.8-0.5,1.6-0.9,2.5 + s-1,1.6-1.7,2.3c-1.4,1.1-3,1.8-4.9,1.9s-3.6,0.2-5.3,0.2h-14.2V158.4z M260.9,166.8v21.1h3.6c1.5-0.1,2.7-0.2,3.7-0.5 + c1-0.3,1.6-1.3,1.8-3c0.2-1.4,0.3-3.8,0.3-7.1c0-2.2-0.1-4.4-0.3-6.6c-0.1-1.7-0.4-2.8-1-3.3c-0.3-0.3-0.8-0.5-1.3-0.5 + c-0.5,0-1.2,0-2.1,0H260.9z M260.9,195.5V220h4.8c0.5,0,1,0,1.5,0c0.5,0,0.9-0.1,1.3-0.2c0.4-0.1,0.7-0.3,1-0.6 + c0.3-0.3,0.5-0.8,0.6-1.4c0-0.3,0-0.7,0.1-1.4c0-0.7,0.1-1.5,0.1-2.4c0-0.9,0.1-1.9,0.1-2.9c0-1.1,0.1-2.1,0.1-3.1 + c0-1.2,0-2.4-0.1-3.5c0-1.2-0.1-2.3-0.2-3.5c-0.1-0.7-0.2-1.4-0.3-2.3c-0.1-0.9-0.4-1.6-1-2.1c-0.4-0.3-0.9-0.5-1.4-0.6 + s-1-0.1-1.5-0.1H260.9z M318.4,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6c-0.2-0.5-0.6-1-1.1-1.4 + c-0.5-0.4-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6s-1.1,1-1.4,1.7s-0.5,1.5-0.6,2.3c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4 + s0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2c0.6,1.1,1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6 + c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6 + c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3 + c-2.9,0-5.1-0.5-6.8-1.4c-1.6-0.9-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3c-0.3-1.9-0.4-3.8-0.5-5.9V203h8.6v12.8 + c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3c1.1,0,2-0.3,2.7-0.8 + c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4c-0.5-1.7-1.4-3.7-2.7-5.9 + c-1.3-2.3-2.8-4.5-4.3-6.6c-1.5-2.1-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6c-0.6-1.4-1.1-3-1.5-4.7 + c-0.4-1.8-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6c0.5,0,1.2,0,2.3,0.1 + c1,0.1,2.1,0.3,3.3,0.7s2.2,1.1,3.3,2s1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1C318.2,166.6,318.3,167.7,318.4,169.1z"/> + <g id="Group-3-Copy" transform="translate(47.000000, 0.000000)"> + <g id="Clip-2"> + </g> + <defs> + <filter id="Adobe_OpacityMaskFilter" filterUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9"> + <feColorMatrix type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0"/> + </filter> + </defs> + <mask maskUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9" id="mask-2_1_"> + <g class="st1"> + <polygon id="path-1_1_" class="st2" points="0,0 225.9,0 225.9,123.9 0,123.9 "/> + </g> + </mask> + <g id="Page-1" class="st3"> + <g id="Mask"> + <path class="st0" d="M177.2,54.3c6.1,21.2,19.4,48.5,24,54.7c5.3-1.2,9.1,1.2,12.4,5.1c-1.2,0.9-2.7,1.5-3.4,2.6 + c-2.7,4.4-6.9,3-10.7,3.2c-2.8,0.2-5.6,0.3-8.4,0.3c-0.9,0-1.8-0.3-2.7-0.5c-1-0.3-1.9-1-2.8-1c-2.5,0.1-4.7,0-7.1-1.1 + c-1-0.5-2.6,0.9-3.6-0.8c-1.1-1.8-2.2-3.6-3.4-5.5c-1.2,0.2-2.2,0.4-3.4,0.6c-2.4-3-3.4-14.8-6.1-17.7 + c-0.6-0.7-2.1-2.2-3.8-2.7c-0.3-0.9-5.4-7.2-5.9-8.7c-0.2-0.5-0.3-1.2-0.7-1.4c-3.1-2-4.2-4.9-4-8.5c0-0.4-0.2-0.7-0.4-1.7 + c-1.2,2.7-2.2,4.8-3.2,7.1c-0.6,1.4-1,2.9-1.8,4.3c-0.5,0.9-1.3,1.6-2,2.3c-2.4,2.2-1.8,0.9-3.2,3.6c-1.1,2-2,4-3,6.1 + c-0.5,1.1-0.9,2.2-1.1,3.3c-0.7,4.1-3.2,7.6-1.5,11.2c3,0.6,6.3,0.5,8.6,2c2.2,1.5,3.5,4.5,5,6.7c-3.1,0.5-5.9,1.2-8.7,1.4 + c-3.8,0.3-7.6,0.2-11.3,0.2c-5,0-10.1-0.1-15.1-0.1c-2.6,0-3.9-1.5-5.4-3.7c-2.1-3.1-1.1-6-0.8-9.1c0.1-0.8,0-3.3-0.1-4.2 + c-0.1-0.9-0.1-1.9,0-2.9c0.2-1.3,0.8-2.6,0.9-3.9c0.1-1.5-0.4-3-0.4-4.5c0-1.5,0.1-3.1,0.5-4.6c0.7-2.7-0.1,0,0.7-2.7 + c0.1-0.2,0-0.7,0-0.8c-0.9-3.6,1.8-6,2.8-8.8c0-0.1,0-0.1-0.1-0.5c-1.8,1.8-4.1,0.8-6.1,1.2c-2.9,0.6-5.7,2.1-8,3 + c-1.4-0.1-2.5-0.4-3.5-0.2c-2,0.5-3.9,1.1-6.2,0.9c-2.5-0.2-5.1,0.6-7.7,0.8c-2.2,0.2-4.8,0.9-6.5,0c-1.5-0.7-2.8-0.9-4.4-1 + c-1.6-0.1-2.4,0.7-2.6,2.1c-1.1,6.3-2.3,12.7-3.1,19.1c-0.4,3.3-0.2,6.6-0.2,9.9c0,1.5,0.6,2.5,1.9,3.5 + c1.5,1.1,2.6,2.7,3.6,4.3c0.8,1.3,0.6,2.6-1.5,2.7c-7.3,0.2-14.6,0.5-21.9,0.4c-2.1,0-4.2-1.5-6.2-2.5 + c-0.3-0.2-0.4-1.1-0.4-1.7c0-4.4,0-13.5,0-18.4c-1,0.6-1.3,0.8-1.6,1c-2.5,2.3-4.9,4.1-7.3,6.4c-1.9,1.8-1.6,3.3,0.2,5.4 + c2.4,2.7,4.4,5.7,4.4,9.5c0,2.5-2.2,3.2-3.8,3.3c-5.7,0.4-11.5,0.4-17.2,0.4c-2.8,0-3.8-1.5-4.4-4.2 + c-1.2-5.4-2.2-10.8-4.3-16.1c-1.6-4.1-2-8.9,1.5-13c5.1-5.9,9.5-12.3,12.8-19.5c1-2.2,1.4-3.8,0.4-6.1c-4.9-1-7.1-3.7-8.2-8.7 + c-1-4.6-0.2-8.9,1-13.2c2.3-7.8,4.1-11,8.4-18c5.6-9,13.4-15.5,22.8-20.2c11.3-5.6,23.3-5.5,35.3-4.2 + c16.2,1.6,32.4,3.6,48.6,5.3c1.3,0.1,2.9-0.2,4.1-0.8c7.7-3.9,15.5-4.2,23.6-1.4c5.6,1.9,11.4,3.6,17.1,5.2 + c2,0.6,4.1,0.8,6.2,1.1c5.7,0.9,11.5,1.8,17.3,2.4c2.9,0.3,5.9,0.1,8.8,0.3c0.7,0,1.5,0.3,2.1,0.7c2.6,1.8,5.1,3.7,7.5,5.6 + c1.6,1.2,3.2,2.3,4.5,3.8c0.6,0.7,0.7,1.9,0.9,2.9c0.3,1.1,0.3,2.6,0.9,3.4c2.6,3.1,5.3,6,8.1,8.9c0.9,1,1.1,1.7,0.3,2.9 + c-1.2,1.6-1.8,3.7-3.3,4.8c-3.1,2.2-6.3,4.3-10.7,3.2c-2.5-0.6-5.5,0.5-8.2,0.8c-2.1,0.3-4.3,0.2-6.2,0.9 + c-4.1,1.6-8.5,1.1-12.5,2.3c-1.5,0.4-2.8,1.2-4.3,1.6C179.2,54.8,178.3,54.5,177.2,54.3"/> + </g> + </g> + </g> + </g> +</g> +</svg> diff --git a/web/pandas/static/img/pydata_book.gif b/web/pandas/static/img/pydata_book.gif new file mode 100644 index 0000000000000..db05c209704a2 Binary files /dev/null and b/web/pandas/static/img/pydata_book.gif differ diff --git a/web/pandas/try.md b/web/pandas/try.md new file mode 100644 index 0000000000000..20e119759df6f --- /dev/null +++ b/web/pandas/try.md @@ -0,0 +1,21 @@ +# Try pandas online + +<section> + <pre data-executable> +import pandas +fibonacci = pandas.Series([1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]) +fibonacci.sum() + </pre> + <script src="https://combinatronics.com/ines/juniper/v0.1.0/dist/juniper.min.js"></script> + <script>new Juniper({ repo: 'datapythonista/pandas-web' })</script> +</section> + +## Interactive tutorials + +You can also try _pandas_ on [Binder](https://mybinder.org/) for one of the next topics: + +- Exploratory analysis of US presidents +- Preprocessing the Titanic dataset to train a machine learning model +- Forecasting the stock market + +_(links will be added soon)_ diff --git a/web/pandas_web.py b/web/pandas_web.py new file mode 100644 index 0000000000000..d515d8a0e1cd7 --- /dev/null +++ b/web/pandas_web.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python +""" +Simple static site generator for the pandas web. + +pandas_web.py takes a directory as parameter, and copies all the files into the +target directory after converting markdown files into html and rendering both +markdown and html files with a context. The context is obtained by parsing +the file ``config.yml`` in the root of the source directory. + +The file should contain: +``` +main: + template_path: <path_to_the_jinja2_templates_directory> + base_template: <template_file_all_other_files_will_extend> + ignore: + - <list_of_files_in_the_source_that_will_not_be_copied> + github_repo_url: <organization/repo-name> + context_preprocessors: + - <list_of_functions_that_will_enrich_the_context_parsed_in_this_file> + markdown_extensions: + - <list_of_markdown_extensions_that_will_be_loaded> +``` + +The rest of the items in the file will be added directly to the context. +""" +import argparse +import datetime +import importlib +import operator +import os +import shutil +import sys +import time +import typing + +import feedparser +import markdown +import jinja2 +import requests +import yaml + + +class Preprocessors: + """ + Built-in context preprocessors. + + Context preprocessors are functions that receive the context used to + render the templates, and enriches it with additional information. + + The original context is obtained by parsing ``config.yml``, and + anything else needed just be added with context preprocessors. + """ + + @staticmethod + def navbar_add_info(context): + """ + Items in the main navigation bar can be direct links, or dropdowns with + subitems. This context preprocessor adds a boolean field + ``has_subitems`` that tells which one of them every element is. It + also adds a ``slug`` field to be used as a CSS id. + """ + for i, item in enumerate(context["navbar"]): + context["navbar"][i] = dict( + item, + has_subitems=isinstance(item["target"], list), + slug=(item["name"].replace(" ", "-").lower()), + ) + return context + + @staticmethod + def blog_add_posts(context): + """ + Given the blog feed defined in the configuration yaml, this context + preprocessor fetches the posts in the feeds, and returns the relevant + information for them (sorted from newest to oldest). + """ + posts = [] + for feed_url in context["blog"]["feed"]: + feed_data = feedparser.parse(feed_url) + for entry in feed_data.entries: + published = datetime.datetime.fromtimestamp( + time.mktime(entry.published_parsed) + ) + posts.append( + { + "title": entry.title, + "author": entry.author, + "published": published, + "feed": feed_data["feed"]["title"], + "link": entry.link, + "description": entry.description, + "summary": entry.summary, + } + ) + posts.sort(key=operator.itemgetter("published"), reverse=True) + context["blog"]["posts"] = posts[: context["blog"]["num_posts"]] + return context + + @staticmethod + def maintainers_add_info(context): + """ + Given the active maintainers defined in the yaml file, it fetches + the GitHub user information for them. + """ + context["maintainers"]["people"] = [] + for user in context["maintainers"]["active"]: + resp = requests.get(f"https://api.github.com/users/{user}") + if context["ignore_io_errors"] and resp.status_code == 403: + return context + resp.raise_for_status() + context["maintainers"]["people"].append(resp.json()) + return context + + @staticmethod + def home_add_releases(context): + context["releases"] = [] + + github_repo_url = context["main"]["github_repo_url"] + resp = requests.get(f"https://api.github.com/repos/{github_repo_url}/releases") + if context["ignore_io_errors"] and resp.status_code == 403: + return context + resp.raise_for_status() + + for release in resp.json(): + if release["prerelease"]: + continue + published = datetime.datetime.strptime( + release["published_at"], "%Y-%m-%dT%H:%M:%SZ" + ) + context["releases"].append( + { + "name": release["tag_name"].lstrip("v"), + "tag": release["tag_name"], + "published": published, + "url": ( + release["assets"][0]["browser_download_url"] + if release["assets"] + else "" + ), + } + ) + return context + + +def get_callable(obj_as_str: str) -> object: + """ + Get a Python object from its string representation. + + For example, for ``sys.stdout.write`` would import the module ``sys`` + and return the ``write`` function. + """ + components = obj_as_str.split(".") + attrs = [] + while components: + try: + obj = importlib.import_module(".".join(components)) + except ImportError: + attrs.insert(0, components.pop()) + else: + break + + if not obj: + raise ImportError(f'Could not import "{obj_as_str}"') + + for attr in attrs: + obj = getattr(obj, attr) + + return obj + + +def get_context(config_fname: str, ignore_io_errors: bool, **kwargs): + """ + Load the config yaml as the base context, and enrich it with the + information added by the context preprocessors defined in the file. + """ + with open(config_fname) as f: + context = yaml.safe_load(f) + + context["ignore_io_errors"] = ignore_io_errors + context.update(kwargs) + + preprocessors = ( + get_callable(context_prep) + for context_prep in context["main"]["context_preprocessors"] + ) + for preprocessor in preprocessors: + context = preprocessor(context) + msg = f"{preprocessor.__name__} is missing the return statement" + assert context is not None, msg + + return context + + +def get_source_files(source_path: str) -> typing.Generator[str, None, None]: + """ + Generate the list of files present in the source directory. + """ + for root, dirs, fnames in os.walk(source_path): + root = os.path.relpath(root, source_path) + for fname in fnames: + yield os.path.join(root, fname) + + +def extend_base_template(content: str, base_template: str) -> str: + """ + Wrap document to extend the base template, before it is rendered with + Jinja2. + """ + result = '{% extends "' + base_template + '" %}' + result += "{% block body %}" + result += content + result += "{% endblock %}" + return result + + +def main( + source_path: str, target_path: str, base_url: str, ignore_io_errors: bool +) -> int: + """ + Copy every file in the source directory to the target directory. + + For ``.md`` and ``.html`` files, render them with the context + before copyings them. ``.md`` files are transformed to HTML. + """ + config_fname = os.path.join(source_path, "config.yml") + + shutil.rmtree(target_path, ignore_errors=True) + os.makedirs(target_path, exist_ok=True) + + sys.stderr.write("Generating context...\n") + context = get_context(config_fname, ignore_io_errors, base_url=base_url) + sys.stderr.write("Context generated\n") + + templates_path = os.path.join(source_path, context["main"]["templates_path"]) + jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path)) + + for fname in get_source_files(source_path): + if os.path.normpath(fname) in context["main"]["ignore"]: + continue + + sys.stderr.write(f"Processing {fname}\n") + dirname = os.path.dirname(fname) + os.makedirs(os.path.join(target_path, dirname), exist_ok=True) + + extension = os.path.splitext(fname)[-1] + if extension in (".html", ".md"): + with open(os.path.join(source_path, fname)) as f: + content = f.read() + if extension == ".md": + body = markdown.markdown( + content, extensions=context["main"]["markdown_extensions"] + ) + content = extend_base_template(body, context["main"]["base_template"]) + content = jinja_env.from_string(content).render(**context) + fname = os.path.splitext(fname)[0] + ".html" + with open(os.path.join(target_path, fname), "w") as f: + f.write(content) + else: + shutil.copy( + os.path.join(source_path, fname), os.path.join(target_path, dirname) + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Documentation builder.") + parser.add_argument( + "source_path", help="path to the source directory (must contain config.yml)" + ) + parser.add_argument( + "--target-path", default="build", help="directory where to write the output" + ) + parser.add_argument( + "--base-url", default="", help="base url where the website is served from" + ) + parser.add_argument( + "--ignore-io-errors", + action="store_true", + help="do not fail if errors happen when fetching " + "data from http sources, and those fail " + "(mostly useful to allow github quota errors " + "when running the script locally)", + ) + args = parser.parse_args() + sys.exit( + main(args.source_path, args.target_path, args.base_url, args.ignore_io_errors) + )
The website can be seen here: https://datapythonista.github.io/pandas-web/ It's still pending that the designer polish the styles. And also the content can surely be improved with everybody's feedback. But I think will be useful to merge this once we iterate a bit on people's feedback, so we can start working on integrating the docs. I'm coordinating with the devs of numpy/scipy, scikit-learn and matplotlib to see if we can reshare things implemented here. Ideally I'd like to have the `pysuerga.py` script in a separate repo (that's why it's not named pandas_web.py`), and also the layout and pages that can be shared (the code of conduct, the donate, the blog,...). For now I add everything here for simplicity, and will create a new project and have it as a dependency here if they are interested. CC: @pandas-dev/pandas-core
https://api.github.com/repos/pandas-dev/pandas/pulls/28014
2019-08-19T14:34:12Z
2019-09-18T13:25:08Z
2019-09-18T13:25:08Z
2019-09-18T13:38:56Z
Backport PR #27882 on branch 0.25.x
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index d1324bc759ea1..87e46f97d3157 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -103,7 +103,6 @@ MultiIndex I/O ^^^ - - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) - @@ -160,6 +159,14 @@ Other - - +I/O and LZMA +~~~~~~~~~~~~ + +Some users may unknowingly have an incomplete Python installation, which lacks the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). +Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. +A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. +For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. + .. _whatsnew_0.251.contributors: Contributors diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index cafc31dad3568..6cc9dd22ce7c9 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2,7 +2,6 @@ # See LICENSE for the license import bz2 import gzip -import lzma import os import sys import time @@ -59,9 +58,12 @@ from pandas.core.arrays import Categorical from pandas.core.dtypes.concat import union_categoricals import pandas.io.common as icom +from pandas.compat import _import_lzma, _get_lzma_file from pandas.errors import (ParserError, DtypeWarning, EmptyDataError, ParserWarning) +lzma = _import_lzma() + # Import CParserError as alias of ParserError for backwards compatibility. # Ultimately, we want to remove this import. See gh-12665 and gh-14479. CParserError = ParserError @@ -645,9 +647,9 @@ cdef class TextReader: 'zip file %s', str(zip_names)) elif self.compression == 'xz': if isinstance(source, str): - source = lzma.LZMAFile(source, 'rb') + source = _get_lzma_file(lzma)(source, 'rb') else: - source = lzma.LZMAFile(filename=source) + source = _get_lzma_file(lzma)(filename=source) else: raise ValueError('Unrecognized compression type: %s' % self.compression) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 5ecd641fc68be..b32da8da3a1fb 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -10,6 +10,7 @@ import platform import struct import sys +import warnings PY35 = sys.version_info[:2] == (3, 5) PY36 = sys.version_info >= (3, 6) @@ -65,3 +66,32 @@ def is_platform_mac(): def is_platform_32bit(): return struct.calcsize("P") * 8 < 64 + + +def _import_lzma(): + """Attempts to import lzma, warning the user when lzma is not available. + """ + try: + import lzma + + return lzma + except ImportError: + msg = ( + "Could not import the lzma module. " + "Your installed Python is incomplete. " + "Attempting to use lzma compression will result in a RuntimeError." + ) + warnings.warn(msg) + + +def _get_lzma_file(lzma): + """Returns the lzma method LZMAFile when the module was correctly imported. + Otherwise, raises a RuntimeError. + """ + if lzma is None: + raise RuntimeError( + "lzma module not available. " + "A Python re-install with the proper " + "dependencies might be required to solve this issue." + ) + return lzma.LZMAFile diff --git a/pandas/io/common.py b/pandas/io/common.py index 9a9620e2d0663..57e710fdfc2ec 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -6,7 +6,6 @@ import gzip from http.client import HTTPException # noqa from io import BytesIO -import lzma import mmap import os import pathlib @@ -22,6 +21,7 @@ from urllib.request import pathname2url, urlopen import zipfile +from pandas.compat import _get_lzma_file, _import_lzma from pandas.errors import ( # noqa AbstractMethodError, DtypeWarning, @@ -32,6 +32,8 @@ from pandas.core.dtypes.common import is_file_like +lzma = _import_lzma() + # gh-12665: Alias for now and remove later. CParserError = ParserError @@ -382,7 +384,7 @@ def _get_handle( # XZ Compression elif compression == "xz": - f = lzma.LZMAFile(path_or_buf, mode) + f = _get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index ce459ab24afe0..16ca1109f266c 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,5 +1,7 @@ import contextlib import os +import subprocess +import textwrap import warnings import pytest @@ -125,3 +127,33 @@ def test_compression_warning(compression_only): with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): with f: df.to_csv(f, compression=compression_only) + + +def test_with_missing_lzma(): + """Tests if import pandas works when lzma is not present.""" + # https://github.com/pandas-dev/pandas/issues/27575 + code = textwrap.dedent( + """\ + import sys + sys.modules['lzma'] = None + import pandas + """ + ) + subprocess.check_output(["python", "-c", code]) + + +def test_with_missing_lzma_runtime(): + """Tests if RuntimeError is hit when calling lzma without + having the module available.""" + code = textwrap.dedent( + """ + import sys + import pytest + sys.modules['lzma'] = None + import pandas + df = pandas.DataFrame() + with pytest.raises(RuntimeError, match='lzma module'): + df.to_csv('foo.csv', compression='xz') + """ + ) + subprocess.check_output(["python", "-c", code]) diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 076d0c9f947c7..30555508f0998 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -13,7 +13,6 @@ import bz2 import glob import gzip -import lzma import os import pickle import shutil @@ -22,7 +21,7 @@ import pytest -from pandas.compat import is_platform_little_endian +from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian import pandas as pd from pandas import Index @@ -30,6 +29,8 @@ from pandas.tseries.offsets import Day, MonthEnd +lzma = _import_lzma() + @pytest.fixture(scope="module") def current_pickle_data(): @@ -270,7 +271,7 @@ def compress_file(self, src_path, dest_path, compression): with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == "xz": - f = lzma.LZMAFile(dest_path, "w") + f = _get_lzma_file(lzma)(dest_path, "w") else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index cf8452cdd0c59..a8f0d0da52e1f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -5,7 +5,6 @@ from functools import wraps import gzip import http.client -import lzma import os import re from shutil import rmtree @@ -26,7 +25,7 @@ ) import pandas._libs.testing as _testing -from pandas.compat import raise_with_traceback +from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback from pandas.core.dtypes.common import ( is_bool, @@ -70,6 +69,8 @@ from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing +lzma = _import_lzma() + N = 30 K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False @@ -211,7 +212,7 @@ def decompress_file(path, compression): elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": - f = lzma.LZMAFile(path, "rb") + f = _get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() @@ -264,9 +265,7 @@ def write_to_compressed(compression, path, data, dest="test"): compress_method = bz2.BZ2File elif compression == "xz": - import lzma - - compress_method = lzma.LZMAFile + compress_method = _get_lzma_file(lzma) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg)
https://api.github.com/repos/pandas-dev/pandas/pulls/28012
2019-08-19T13:11:42Z
2019-08-19T14:45:17Z
2019-08-19T14:45:17Z
2019-08-19T14:45:21Z
BUG: Avoid duplicating entire exploded column when joining back with origi…
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 5b9e3a7dbad06..f00393e2e1ea1 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -169,6 +169,7 @@ Indexing ^^^^^^^^ - Bug in assignment using a reverse slicer (:issue:`26939`) +- Bug in :meth:`DataFrame.explode` would duplicate frame in the presence of duplicates in the index (:issue:`28010`) - Bug in reindexing a :meth:`PeriodIndex` with another type of index that contained a `Period` (:issue:`28323`) (:issue:`28337`) - Fix assignment of column via `.loc` with numpy non-ns datetime type (:issue:`27395`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f1ed3a125f60c..d3656f881a886 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6267,12 +6267,13 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame": if not self.columns.is_unique: raise ValueError("columns must be unique") - result = self[column].explode() - return ( - self.drop([column], axis=1) - .join(result) - .reindex(columns=self.columns, copy=False) - ) + df = self.reset_index(drop=True) + result = df[column].explode() + result = df.drop([column], axis=1).join(result) + result.index = self.index.take(result.index) + result = result.reindex(columns=self.columns, copy=False) + + return result def unstack(self, level=-1, fill_value=None): """ diff --git a/pandas/tests/frame/test_explode.py b/pandas/tests/frame/test_explode.py index b4330aadbfba3..c07de35f8bf34 100644 --- a/pandas/tests/frame/test_explode.py +++ b/pandas/tests/frame/test_explode.py @@ -118,3 +118,47 @@ def test_usecase(): index=[0, 0, 1, 1], ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "input_dict, input_index, expected_dict, expected_index", + [ + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + [0, 0], + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + [0, 0, 0, 0], + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.Index([0, 0], name="my_index"), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.Index([0, 0, 0, 0], name="my_index"), + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0], [1, 1]], names=["my_first_index", "my_second_index"] + ), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0], [1, 1, 1, 1]], + names=["my_first_index", "my_second_index"], + ), + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.MultiIndex.from_arrays([[0, 0], [1, 1]], names=["my_index", None]), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0], [1, 1, 1, 1]], names=["my_index", None] + ), + ), + ], +) +def test_duplicate_index(input_dict, input_index, expected_dict, expected_index): + # GH 28005 + df = pd.DataFrame(input_dict, index=input_index) + result = df.explode("col1") + expected = pd.DataFrame(expected_dict, index=expected_index, dtype=object) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_explode.py b/pandas/tests/series/test_explode.py index 331546f7dc73d..e4974bd0af145 100644 --- a/pandas/tests/series/test_explode.py +++ b/pandas/tests/series/test_explode.py @@ -111,3 +111,11 @@ def test_nested_EA(): pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1] ) tm.assert_series_equal(result, expected) + + +def test_duplicate_index(): + # GH 28005 + s = pd.Series([[1, 2], [3, 4]], index=[0, 0]) + result = s.explode() + expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object) + tm.assert_series_equal(result, expected)
…nal frame - [x] closes #28005 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28010
2019-08-19T12:44:12Z
2019-09-17T13:38:14Z
2019-09-17T13:38:14Z
2019-09-17T13:43:38Z
DOC: Corrected file description in read_fwf()
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 7ba103c5ff996..1d49dbdee9c03 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -28,7 +28,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like :delim: ; text;`CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`__;:ref:`read_csv<io.read_csv_table>`;:ref:`to_csv<io.store_in_csv>` - text;`TXT <https://www.oracle.com/webfolder/technetwork/data-quality/edqhelp/Content/introduction/getting_started/configuring_fixed_width_text_file_formats.htm>`__;:ref:`read_fwf<io.fwf_reader>` + text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>` text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>` text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>` text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>`
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28009
2019-08-19T12:16:53Z
2019-08-20T18:40:21Z
2019-08-20T18:40:21Z
2019-08-20T18:40:25Z
[CI] Add pip dependence explicitly
diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml index cb2ac08cbf758..39315b15a018b 100644 --- a/ci/deps/azure-macos-35.yaml +++ b/ci/deps/azure-macos-35.yaml @@ -22,6 +22,7 @@ dependencies: - xlrd - xlsxwriter - xlwt + - pip - pip: - pyreadstat # universal
Close #27374. Get rid of the conda warning.
https://api.github.com/repos/pandas-dev/pandas/pulls/28008
2019-08-19T06:14:58Z
2019-08-21T14:21:45Z
2019-08-21T14:21:45Z
2019-08-21T14:21:49Z
DOC: Improve melt example (#23844)
diff --git a/doc/source/_static/reshaping_melt.png b/doc/source/_static/reshaping_melt.png index d0c4e77655e60..26daff22640b7 100644 Binary files a/doc/source/_static/reshaping_melt.png and b/doc/source/_static/reshaping_melt.png differ diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index f118fe84d523a..6d61523302f08 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -292,11 +292,11 @@ For instance, cheese = pd.DataFrame({'first': ['John', 'Mary'], 'last': ['Doe', 'Bo'], - 'height': [5.5, 6.0], - 'weight': [130, 150]}) + 'A': [5.5, 6.0], + 'B': [7, 9]}) cheese cheese.melt(id_vars=['first', 'last']) - cheese.melt(id_vars=['first', 'last'], var_name='quantity') + cheese.melt(id_vars=['first', 'last'], var_name='treatment') Another way to transform is to use the :func:`~pandas.wide_to_long` panel data convenience function. It is less flexible than :func:`~pandas.melt`, but more
- [x] closes #23844
https://api.github.com/repos/pandas-dev/pandas/pulls/28006
2019-08-18T23:30:19Z
2019-08-27T08:13:19Z
null
2019-08-27T08:13:19Z
Hack fix for BUG - GH24784
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index f3fcb090e9883..a5ff25825009c 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -270,6 +270,13 @@ def _get_index_freq(data): weekdays = np.unique(data.index.dayofweek) if (5 in weekdays) or (6 in weekdays): freq = None + # This is a hack introduced to avoid tick resolution adjustment issue - + # Locators off by one day #24784 + elif freq == "D": + freq = None + elif freq.name == "D": + freq = None + # hack logic end - Locators off by one day #24784 return freq diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 5a591f72d7361..dae126350df6d 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -556,6 +556,35 @@ def _check_plot_works(f, filterwarnings="always", **kwargs): return ret +def _check_plot_works_with_continuous_dates(f, filterwarnings="always", **kwargs): + ''' first version of the test for - BUG 24784''' + import matplotlib.pyplot as plt + import matplotlib.dates as mdates + + ret = None + with warnings.catch_warnings(): + warnings.simplefilter(filterwarnings) + try: + try: + fig = kwargs["figure"] + except KeyError: + fig = plt.gcf() + + plt.clf() + + ax = kwargs.get("ax", fig.add_subplot(211)) # noqa + ret = f(**kwargs) + ret.xaxis.set_major_locator(mdates.DayLocator()) + ret.xaxis.set_major_formatter(mdates.DateFormatter("\n%b%d")) + + assert_is_valid_plot_return_object(ret) + + with ensure_clean(return_filelike=True) as path: + plt.savefig(path) + finally: + tm.close(fig) + + return ret def curpath(): pth, _ = os.path.split(os.path.abspath(__file__)) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 111c3a70fc09c..7ee0172b2c924 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import DataFrame, Series, date_range -from pandas.tests.plotting.common import TestPlotBase, _check_plot_works +from pandas.tests.plotting.common import TestPlotBase, _check_plot_works, _check_plot_works_with_continuous_dates import pandas.util.testing as tm import pandas.plotting as plotting @@ -731,6 +731,28 @@ def test_dup_datetime_index_plot(self): s = Series(values, index=index) _check_plot_works(s.plot) + @pytest.mark.slow + def test_continuous_dates(self): + s = Series(np.arange(10), name="x") + s_err = np.random.randn(10) + + ix = date_range("1/1/2018", "1/10/2018", freq="D") + ts = Series(np.arange(10), index=ix, name="x") + ts_err = Series(np.random.randn(10), index=ix) + td_err = DataFrame(randn(10, 2), index=ix, columns=["x", "y"]) + ax = _check_plot_works_with_continuous_dates(ts.plot, yerr=ts_err) + + self._check_text_labels(ax.get_xticklabels(), ["\nJan01", "\nJan02", "\nJan03", "\nJan04", "\nJan05", + "\nJan06", "\nJan07", "\nJan08", "\nJan09", "\nJan10"]) + + # check incorrect lengths and types + with pytest.raises(ValueError): + s.plot(yerr=np.arange(11)) + + s_err = ["zzz"] * 10 + with pytest.raises(TypeError): + s.plot(yerr=s_err) + @pytest.mark.slow def test_errorbar_plot(self):
- [X] closes #24784 - [X] tests added / passed - 1 - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry - Test utility method "_check_plot_works_with_continuous_dates" and Test case "test_continuous_dates"
https://api.github.com/repos/pandas-dev/pandas/pulls/28003
2019-08-18T17:52:20Z
2019-10-03T19:11:36Z
null
2019-10-03T19:11:42Z
TST 27905 - testing groupby.transform('sum')
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index d3972e6ba9008..f5eb58112fda3 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1074,3 +1074,34 @@ def test_transform_lambda_with_datetimetz(): name="time", ) assert_series_equal(result, expected) + + +# @pytest.mark.parametrize( +# "input_df, expected_df", +# [ +# ( +# DataFrame( +# { +# "A": [121, 121, 121, 121, 231, 231, 676], +# "B": [1, 2, np.nan, 3, 3, np.nan, 4], +# } +# ), +# 1, +# ), +# (DataFrame({"B": [6.0, 6.0, 6.0, 6.0, 3.0, 3.0, 4.0]}), 1), +# ], +# ) +# def test_groupby_transform_sum(input_df, expected_df): +# # GH 27905 - Test sum in groupby.transform +# df_transform = input_df.groupby("A")["B"].transform("sum") +# df_transform = df_transform.to_frame() +# assert all(df_transform == expected_df) + + +def test_groupby_transform_sum(): + input_df = DataFrame( + {"A": [121, 121, 121, 121, 231, 231, 676], "B": [1, 2, np.nan, 3, 3, np.nan, 4]} + ) + expected = Series([6.0, 6.0, 6.0, 6.0, 3.0, 3.0, 4.0]) + result = input_df.groupby("A")["B"].transform("sum") + assert_series_equal(result, expected)
- [x] closes #27905 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28002
2019-08-18T17:44:38Z
2019-09-08T16:05:44Z
null
2019-09-08T16:05:44Z
DOC: Fixes to docstrings
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba1c516b9b444..90779baea32cb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1875,7 +1875,7 @@ def __iter__(self): # can we get a better explanation of this? def keys(self): """ - Get the 'info axis' (see Indexing for more) + Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index d38221d784273..76c01535a26e7 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -9,8 +9,7 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover r""" - Read text from clipboard and pass to read_csv. See read_csv for the - full argument list + Read text from clipboard and pass to read_csv. Parameters ---------- @@ -18,9 +17,13 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. + **kwargs + See read_csv for the full argument list. + Returns ------- - parsed : DataFrame + DataFrame + A parsed DataFrame object. """ encoding = kwargs.pop("encoding", "utf-8") diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 154656fbb250b..997edf49d9e8f 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -837,10 +837,10 @@ def parse( **kwds ): """ - Parse specified sheet(s) into a DataFrame + Parse specified sheet(s) into a DataFrame. Equivalent to read_excel(ExcelFile, ...) See the read_excel - docstring for more info on accepted parameters + docstring for more info on accepted parameters. Returns ------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a3ff837bc7f52..4025e6d64a859 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -579,6 +579,7 @@ def parser_f( escapechar=None, comment=None, encoding=None, + auto_encode=True, dialect=None, # Error Handling error_bad_lines=True, @@ -663,6 +664,7 @@ def parser_f( usecols=usecols, verbose=verbose, encoding=encoding, + auto_encode=auto_encode, squeeze=squeeze, memory_map=memory_map, float_precision=float_precision, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 6af5dd6f1bf37..057672f6f1be8 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -431,8 +431,9 @@ def _is_metadata_of(group, parent_group): class HDFStore: """ - Dict-like IO interface for storing pandas objects in PyTables - either Fixed or Table format. + Dict-like IO interface for storing pandas objects in PyTables. + + Either Fixed or Table format. Parameters ---------- @@ -564,13 +565,12 @@ def __exit__(self, exc_type, exc_value, traceback): def keys(self): """ - Return a (potentially unordered) list of the keys corresponding to the - objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. - have the leading '/' + Return a list of keys corresponding to objects stored in HDFStore. Returns ------- list + List of ABSOLUTE path-names (e.g. have the leading '/'). """ return [n._v_pathname for n in self.groups()] @@ -703,7 +703,7 @@ def flush(self, fsync=False): def get(self, key): """ - Retrieve pandas object stored in file + Retrieve pandas object stored in file. Parameters ---------- @@ -711,7 +711,8 @@ def get(self, key): Returns ------- - obj : same type as object stored in file + object + Same type as object stored in file. """ group = self.get_node(key) if group is None: @@ -731,25 +732,31 @@ def select( **kwargs ): """ - Retrieve pandas object stored in file, optionally based on where - criteria + Retrieve pandas object stored in file, optionally based on where criteria. Parameters ---------- key : object - where : list of Term (or convertible) objects, optional - start : integer (defaults to None), row number to start selection - stop : integer (defaults to None), row number to stop selection - columns : a list of columns that if not None, will limit the return - columns - iterator : boolean, return an iterator, default False - chunksize : nrows to include in iteration, return an iterator - auto_close : boolean, should automatically close the store when - finished, default is False + Object being retrieved from file. + where : list, default None + List of Term (or convertible) objects, optional. + start : int, default None + Row number to start selection. + stop : int, default None + Row number to stop selection. + columns : list, default None + A list of columns that if not None, will limit the return columns. + iterator : bool, default False + Returns an iterator. + chunksize : int, default None + Number or rows to include in iteration, return an iterator. + auto_close : bool, default False + Should automatically close the store when finished. Returns ------- - The selected object + object + Retrieved object from file. """ group = self.get_node(key) if group is None: @@ -929,28 +936,30 @@ def func(_start, _stop, _where): def put(self, key, value, format=None, append=False, **kwargs): """ - Store object in HDFStore + Store object in HDFStore. Parameters ---------- - key : object - value : {Series, DataFrame} - format : 'fixed(f)|table(t)', default is 'fixed' + key : object + value : {Series, DataFrame} + format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format - Fast writing/reading. Not-appendable, nor searchable + Fast writing/reading. Not-appendable, nor searchable. table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching - / selecting subsets of the data - append : boolean, default False + / selecting subsets of the data. + append : bool, default False This will force Table format, append the input data to the existing. - data_columns : list of columns to create as data columns, or True to + data_columns : list, default None + List of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. - encoding : default None, provide an encoding for strings - dropna : boolean, default False, do not write an ALL nan row to - the store settable by the option 'io.hdf.dropna_table' + encoding : str, default None + Provide an encoding for strings. + dropna : bool, default False, do not write an ALL nan row to + The store settable by the option 'io.hdf.dropna_table'. """ if format is None: format = get_option("io.hdf.default_format") or "fixed" @@ -1165,12 +1174,13 @@ def create_table_index(self, key, **kwargs): s.create_index(**kwargs) def groups(self): - """return a list of all the top-level nodes (that are not themselves a - pandas storage object) + """ + Return a list of all the top-level nodes (that are not themselves a pandas storage object). Returns ------- list + List of objects. """ _tables() self._check_if_open() @@ -1188,10 +1198,12 @@ def groups(self): ] def walk(self, where="/"): - """ Walk the pytables group hierarchy for pandas objects + """ + Walk the pytables group hierarchy for pandas objects. This generator will yield the group path, subgroups and pandas object names for each group. + Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its @@ -1202,18 +1214,17 @@ def walk(self, where="/"): Parameters ---------- - where : str, optional + where : str, default "/" Group where to start walking. - If not supplied, the root group is used. Yields ------ path : str - Full path to a group (without trailing '/') - groups : list of str - names of the groups contained in `path` - leaves : list of str - names of the pandas objects contained in `path` + Full path to a group (without trailing '/'). + groups : list + Names (strings) of the groups contained in `path`. + leaves : list + Names (strings) of the pandas objects contained in `path`. """ _tables() self._check_if_open() diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index a208d5ad2fea9..156a3a1967cf5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -204,8 +204,7 @@ def __add__(date): normalize : bool, default False Whether to round the result of a DateOffset addition down to the previous midnight. - **kwds - Temporal parameter that add to or replace the offset value. + **kwds : Temporal parameter that add to or replace the offset value. Parameters that **add** to the offset (like Timedelta): @@ -231,18 +230,25 @@ def __add__(date): - microsecond - nanosecond + . + See Also -------- - dateutil.relativedelta.relativedelta + dateutil.relativedelta.relativedelta : The relativedelta type is designed + + to be applied to an existing datetime an can replace specific components of + + that datetime, or represents an interval of time. Examples -------- + >>> from pandas.tseries.offsets import DateOffset >>> ts = pd.Timestamp('2017-01-01 09:10:11') >>> ts + DateOffset(months=3) Timestamp('2017-04-01 09:10:11') >>> ts = pd.Timestamp('2017-01-01 09:10:11') - >>> ts + DateOffset(month=3) + >>> ts + DateOffset(months=2) Timestamp('2017-03-01 09:10:11') """
- [X] xref #27979 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Partially resolves issue
https://api.github.com/repos/pandas-dev/pandas/pulls/28001
2019-08-18T17:43:39Z
2019-08-22T17:03:11Z
null
2019-08-22T17:05:37Z
When a DataFrame is merged with an empty dataframe, it was creating an incorrect index for the resulting dataframe. Fix for it
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..31fe00b35705d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10,6 +10,8 @@ """ import collections from collections import OrderedDict, abc +import pandas +from pandas import Series import functools from io import StringIO import itertools @@ -7187,21 +7189,30 @@ def _join_compat( other = DataFrame({other.name: other}) if isinstance(other, DataFrame): - return merge( - self, - other, - left_on=on, - how=how, - left_index=on is None, - right_index=True, - suffixes=(lsuffix, rsuffix), - sort=sort, - ) - else: - if on is not None: - raise ValueError( - "Joining multiple DataFrames only supported for joining on index" + if on is None: + return merge( + self, + other, + left_on=on, + how=how, + left_index=True, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort ) + else: + result = merge( + self, + other, + left_on=on, + how=how, + left_index=False, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort + ) + result.set_index([pandas.Series([i for i in range(len(result))])], inplace=True) + return result frames = [self] + list(other)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry In the current fix, if any dataframe is empty, a index is set explicitly using the set_index function
https://api.github.com/repos/pandas-dev/pandas/pulls/28000
2019-08-18T17:32:14Z
2019-08-27T22:20:20Z
null
2019-08-27T22:20:20Z
ENH: Add support for dataclasses in the DataFrame constructor
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst index 200d567a62732..d7f7690f8c3d0 100644 --- a/doc/source/user_guide/dsintro.rst +++ b/doc/source/user_guide/dsintro.rst @@ -397,6 +397,28 @@ The result will be a DataFrame with the same index as the input Series, and with one column whose name is the original name of the Series (only if no other column name provided). +.. _basics.dataframe.from_list_dataclasses: + +From a list of dataclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 1.1.0 + +Data Classes as introduced in `PEP557 <https://www.python.org/dev/peps/pep-0557>`__, +can be passed into the DataFrame constructor. +Passing a list of dataclasses is equivilent to passing a list of dictionaries. + +Please be aware, that that all values in the list should be dataclasses, mixing +types in the list would result in a TypeError. + +.. ipython:: python + + from dataclasses import make_dataclass + + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) + **Missing data** Much more will be said on this topic in the :ref:`Missing data <missing_data>` diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1afe7edf2641b..f5997a13e785d 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -24,6 +24,7 @@ is_array_like, is_bool, is_complex, + is_dataclass, is_decimal, is_dict_like, is_file_like, diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 56b880dca1241..d1607b5ede6c3 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -386,3 +386,39 @@ def is_sequence(obj) -> bool: return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False + + +def is_dataclass(item): + """ + Checks if the object is a data-class instance + + Parameters + ---------- + item : object + + Returns + -------- + is_dataclass : bool + True if the item is an instance of a data-class, + will return false if you pass the data class itself + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> is_dataclass(Point) + False + >>> is_dataclass(Point(0,2)) + True + + """ + try: + from dataclasses import is_dataclass + + return is_dataclass(item) and not isinstance(item, type) + except ImportError: + return False diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 72d9ef7d0d35f..9b140238a9389 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -77,6 +77,7 @@ ensure_platform_int, infer_dtype_from_object, is_bool_dtype, + is_dataclass, is_datetime64_any_dtype, is_dict_like, is_dtype_equal, @@ -117,6 +118,7 @@ from pandas.core.internals import BlockManager from pandas.core.internals.construction import ( arrays_to_mgr, + dataclasses_to_dicts, get_names_from_index, init_dict, init_ndarray, @@ -474,6 +476,8 @@ def __init__( if not isinstance(data, (abc.Sequence, ExtensionArray)): data = list(data) if len(data) > 0: + if is_dataclass(data[0]): + data = dataclasses_to_dicts(data) if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index ab363e10eb098..c4416472d451c 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -429,6 +429,33 @@ def _get_axes(N, K, index, columns): return index, columns +def dataclasses_to_dicts(data): + """ Converts a list of dataclass instances to a list of dictionaries + + Parameters + ---------- + data : List[Type[dataclass]] + + Returns + -------- + list_dict : List[dict] + + Examples + -------- + >>> @dataclass + >>> class Point: + ... x: int + ... y: int + + >>> dataclasses_to_dicts([Point(1,2), Point(2,3)]) + [{"x":1,"y":2},{"x":2,"y":3}] + + """ + from dataclasses import asdict + + return list(map(asdict, data)) + + # --------------------------------------------------------------------- # Conversion of Inputs to Arrays diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index d938c0f6f1066..058b706cfe3aa 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -9,7 +9,7 @@ import pytest import pytz -from pandas.compat import is_platform_little_endian +from pandas.compat import PY37, is_platform_little_endian from pandas.compat.numpy import _is_numpy_dev from pandas.core.dtypes.common import is_integer_dtype @@ -1364,6 +1364,46 @@ def test_constructor_list_of_namedtuples(self): result = DataFrame(tuples, columns=["y", "z"]) tm.assert_frame_equal(result, expected) + @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") + def test_constructor_list_of_dataclasses(self): + # GH21910 + from dataclasses import make_dataclass + + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + datas = [Point(0, 3), Point(1, 3)] + expected = DataFrame({"x": [0, 1], "y": [3, 3]}) + result = DataFrame(datas) + tm.assert_frame_equal(result, expected) + + @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") + def test_constructor_list_of_dataclasses_with_varying_types(self): + # GH21910 + from dataclasses import make_dataclass + + # varying types + Point = make_dataclass("Point", [("x", int), ("y", int)]) + HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)]) + + datas = [Point(0, 3), HLine(1, 3, 3)] + + expected = DataFrame( + {"x": [0, np.nan], "y": [3, 3], "x0": [np.nan, 1], "x1": [np.nan, 3]} + ) + result = DataFrame(datas) + tm.assert_frame_equal(result, expected) + + @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") + def test_constructor_list_of_dataclasses_error_thrown(self): + # GH21910 + from dataclasses import make_dataclass + + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + # expect TypeError + with pytest.raises(TypeError): + DataFrame([Point(0, 0), {"x": 1, "y": 0}]) + def test_constructor_list_of_dict_order(self): # GH10056 data = [
Added support for data-classes when used in the construction of a new dataframe. - [X] closes #21910 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Simply put, added support to use dataclasses in the following way: ```python from dataclasses import dataclass @dataclass class Person: name: str age: int df = DataFrame([Person("me", 25), Person("you", 35)]) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/27999
2019-08-18T16:54:11Z
2020-03-15T01:09:41Z
2020-03-15T01:09:41Z
2020-04-09T23:34:29Z
Raise exception in read_csv when prefix is set, but not used because a header exists
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a3ff837bc7f52..233fafa918722 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1930,6 +1930,8 @@ def __init__(self, src, **kwds): ] else: self.names = list(range(self._reader.table_width)) + elif self.prefix: + raise ValueError("argument prefix must be None if no headers provided") # gh-9755 #
Raised an error if argument prefix is set when there are headers present during pandas read_csv. - [x] closes #27394 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27998
2019-08-18T16:52:28Z
2019-09-17T15:37:22Z
null
2019-09-17T15:37:23Z
New branch
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 038447ad252fe..096dff0bce309 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -463,6 +463,7 @@ def pad_inplace(algos_t[:] values, continue fill_count += 1 values[i] = val + else: fill_count = 0 val = values[i] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..fe2144f7464bb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4186,14 +4186,16 @@ def fillna( @Appender(_shared_docs["replace"] % _shared_doc_kwargs) def replace( - self, - to_replace=None, - value=None, - inplace=False, - limit=None, - regex=False, - method="pad", + self, + to_replace=None, + value=None, + inplace=False, + limit=None, + regex=False, + method="pad", + replace_by_none=False ): + return super().replace( to_replace=to_replace, value=value, @@ -4201,6 +4203,7 @@ def replace( limit=limit, regex=regex, method=method, + replace_by_none=replace_by_none ) @Appender(_shared_docs["shift"] % _shared_doc_kwargs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba1c516b9b444..aa7b5dd0247d0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -107,14 +107,16 @@ def _single_replace(self, to_replace, method, inplace, limit): result = self if inplace else self.copy() fill_f = missing.get_fill_func(method) + mask = missing.mask_missing(result.values, to_replace) - values = fill_f(result.values, limit=limit, mask=mask) + values = fill_f(result.values, limit=limit, mask=mask) if values.dtype == orig_dtype and inplace: return result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self) + if inplace: self._update_inplace(result._data) return @@ -6380,6 +6382,8 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): .. versionchanged:: 0.23.0 Added to DataFrame. + replace_by_none : bool, default False + If True, replace to_replace by None Returns ------- @@ -6585,28 +6589,97 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): dtype: object """ + @Appender(_shared_docs["replace"] % _shared_doc_kwargs) def replace( - self, - to_replace=None, - value=None, - inplace=False, - limit=None, - regex=False, - method="pad", + self, + to_replace=None, + value=None, + inplace=False, + limit=None, + regex=False, + method="pad", + replace_by_none=False ): + + inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: + print('if not is_bool(regex) and to_replace is not None') raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool") self._consolidate_inplace() + if value is None and is_dict_like(to_replace): + if not is_dict_like(to_replace) and not is_dict_like(regex): + to_replace = [to_replace] + + if isinstance(to_replace, (tuple, list)): + + if isinstance(self, ABCDataFrame): + + return self.apply( + _single_replace, args=(to_replace, method, inplace, limit) + ) + + return _single_replace(self, to_replace, method, inplace, limit) + + if not is_dict_like(to_replace): + + if not is_dict_like(regex): + raise TypeError( + 'If "to_replace" and "value" are both None' + ' and "to_replace" is not a list, then ' + "regex must be a mapping" + ) + to_replace = regex + regex = True + + items = list(to_replace.items()) + keys, values = zip(*items) if items else ([], []) + + are_mappings = [is_dict_like(v) for v in values] + + if any(are_mappings): + + if not all(are_mappings): + raise TypeError( + "If a nested mapping is passed, all values" + " of the top level mapping must be " + "mappings" + ) + # passed a nested dict/Series + to_rep_dict = {} + value_dict = {} + + for k, v in items: + keys, values = list(zip(*v.items())) or ([], []) + if set(keys) & set(values): + raise ValueError( + "Replacement not allowed with " + "overlapping keys and values" + ) + to_rep_dict[k] = list(keys) + value_dict[k] = list(values) + + to_replace, value = to_rep_dict, value_dict + else: + + to_replace, value = keys, values + + return self.replace( + to_replace, value, inplace=inplace, limit=limit, regex=regex + ) + + if value is None and not replace_by_none: - if value is None: # passing a single value that is scalar like # when value is None (GH5319), for compat + if not is_dict_like(to_replace) and not is_dict_like(regex): + to_replace = [to_replace] + if isinstance(to_replace, (tuple, list)): if isinstance(self, ABCDataFrame): return self.apply( @@ -6615,6 +6688,7 @@ def replace( return _single_replace(self, to_replace, method, inplace, limit) if not is_dict_like(to_replace): + if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None' @@ -6664,7 +6738,9 @@ def replace( return self new_data = self._data + if is_dict_like(to_replace): + if is_dict_like(value): # {'A' : NA} -> {'A' : 0} res = self if inplace else self.copy() for c, src in to_replace.items(): @@ -6681,6 +6757,7 @@ def replace( # {'A': NA} -> 0 elif not is_list_like(value): + keys = [(k, src) for k, src in to_replace.items() if k in self] keys_len = len(keys) - 1 for i, (k, src) in enumerate(keys): @@ -6698,6 +6775,7 @@ def replace( elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): + if len(to_replace) != len(value): raise ValueError( "Replacement lists must match " @@ -6713,14 +6791,15 @@ def replace( ) else: # [NA, ''] -> 0 + new_data = self._data.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) elif to_replace is None: if not ( - is_re_compilable(regex) - or is_list_like(regex) - or is_dict_like(regex) + is_re_compilable(regex) + or is_list_like(regex) + or is_dict_like(regex) ): raise TypeError( "'regex' must be a string or a compiled " diff --git a/pandas/core/missing.py b/pandas/core/missing.py index bc81fbb7e1ce0..d5c884ba44294 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -26,40 +26,54 @@ def mask_missing(arr, values_to_mask): Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True """ + dtype, values_to_mask = infer_dtype_from_array(values_to_mask) + try: values_to_mask = np.array(values_to_mask, dtype=dtype) except Exception: values_to_mask = np.array(values_to_mask, dtype=object) + na_mask = isna(values_to_mask) + nonna = values_to_mask[~na_mask] + mask = None for x in nonna: if mask is None: + # numpy elementwise comparison warning + if is_numeric_v_string_like(arr, x): + + mask = False else: + mask = arr == x + # if x is a string and arr is not, then we get False and we must # expand the mask to size arr.shape if is_scalar(mask): mask = np.zeros(arr.shape, dtype=bool) else: + # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask |= False else: mask |= arr == x + if na_mask.any(): + if mask is None: mask = isna(arr) else: @@ -67,8 +81,10 @@ def mask_missing(arr, values_to_mask): # GH 21977 if mask is None: + mask = np.zeros(arr.shape, dtype=bool) + return mask @@ -78,6 +94,7 @@ def clean_fill_method(method, allow_nearest=False): return None if isinstance(method, str): + method = method.lower() if method == "ffill": method = "pad" @@ -94,6 +111,7 @@ def clean_fill_method(method, allow_nearest=False): expecting=expecting, method=method ) raise ValueError(msg) + return method @@ -573,6 +591,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): def get_fill_func(method): method = clean_fill_method(method) + return _fill_methods[method] diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..e4455c5c7d39d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4314,6 +4314,7 @@ def replace( limit=None, regex=False, method="pad", + replace_by_none=False ): return super().replace( to_replace=to_replace, @@ -4322,6 +4323,7 @@ def replace( limit=limit, regex=regex, method=method, + replace_by_none=replace_by_none ) @Appender(generic._shared_docs["shift"] % _shared_doc_kwargs) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 7b9e50ebbf342..4d7b850d8f703 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -519,6 +519,7 @@ def test_validate_bool_args(self): invalid_values = [1, "True", [1, 2, 3], 5.0] for value in invalid_values: + with pytest.raises(ValueError): super(DataFrame, df).rename_axis( mapper={"a": "x", "b": "y"}, axis=1, inplace=value diff --git a/requirements-dev.txt b/requirements-dev.txt index cf11a3ee28258..9a73952cc86e8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -31,7 +31,6 @@ ipywidgets nbformat notebook>=5.7.5 pip -blosc bottleneck>=1.2.1 ipykernel ipython>=5.6.0 @@ -54,4 +53,4 @@ xarray xlrd xlsxwriter xlwt -pyreadstat \ No newline at end of file +pyreadstat
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27997
2019-08-18T16:39:36Z
2019-08-19T13:16:49Z
null
2019-08-19T14:23:23Z
New branch
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 038447ad252fe..096dff0bce309 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -463,6 +463,7 @@ def pad_inplace(algos_t[:] values, continue fill_count += 1 values[i] = val + else: fill_count = 0 val = values[i] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..fe2144f7464bb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4186,14 +4186,16 @@ def fillna( @Appender(_shared_docs["replace"] % _shared_doc_kwargs) def replace( - self, - to_replace=None, - value=None, - inplace=False, - limit=None, - regex=False, - method="pad", + self, + to_replace=None, + value=None, + inplace=False, + limit=None, + regex=False, + method="pad", + replace_by_none=False ): + return super().replace( to_replace=to_replace, value=value, @@ -4201,6 +4203,7 @@ def replace( limit=limit, regex=regex, method=method, + replace_by_none=replace_by_none ) @Appender(_shared_docs["shift"] % _shared_doc_kwargs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba1c516b9b444..c047fe4e7da75 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -107,14 +107,16 @@ def _single_replace(self, to_replace, method, inplace, limit): result = self if inplace else self.copy() fill_f = missing.get_fill_func(method) + mask = missing.mask_missing(result.values, to_replace) - values = fill_f(result.values, limit=limit, mask=mask) + values = fill_f(result.values, limit=limit, mask=mask) if values.dtype == orig_dtype and inplace: return result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self) + if inplace: self._update_inplace(result._data) return @@ -6380,6 +6382,8 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): .. versionchanged:: 0.23.0 Added to DataFrame. + replace_by_none : bool, default False + If True, replace to_replace by None Returns ------- @@ -6585,36 +6589,42 @@ def bfill(self, axis=None, inplace=False, limit=None, downcast=None): dtype: object """ + @Appender(_shared_docs["replace"] % _shared_doc_kwargs) def replace( - self, - to_replace=None, - value=None, - inplace=False, - limit=None, - regex=False, - method="pad", + self, + to_replace=None, + value=None, + inplace=False, + limit=None, + regex=False, + method="pad", + replace_by_none=False ): + + inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: + print('if not is_bool(regex) and to_replace is not None') raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool") self._consolidate_inplace() - - if value is None: - # passing a single value that is scalar like - # when value is None (GH5319), for compat + if value is None and is_dict_like(to_replace): if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): + if isinstance(self, ABCDataFrame): + return self.apply( _single_replace, args=(to_replace, method, inplace, limit) ) + return _single_replace(self, to_replace, method, inplace, limit) if not is_dict_like(to_replace): + if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None' @@ -6630,6 +6640,7 @@ def replace( are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): + if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values" @@ -6652,11 +6663,78 @@ def replace( to_replace, value = to_rep_dict, value_dict else: + to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) + + + if value is None and not replace_by_none: + + # passing a single value that is scalar like + # when value is None (GH5319), for compat + + if not is_dict_like(to_replace) and not is_dict_like(regex): + + to_replace = [to_replace] + + + if isinstance(to_replace, (tuple, list)): + + + if isinstance(self, ABCDataFrame): + return self.apply( + _single_replace, args=(to_replace, method, inplace, limit) + ) + + return _single_replace(self, to_replace, method, inplace, limit) + + if not is_dict_like(to_replace): + + if not is_dict_like(regex): + raise TypeError( + 'If "to_replace" and "value" are both None' + ' and "to_replace" is not a list, then ' + "regex must be a mapping" + ) + to_replace = regex + regex = True + + items = list(to_replace.items()) + keys, values = zip(*items) if items else ([], []) + + are_mappings = [is_dict_like(v) for v in values] + + if any(are_mappings): + if not all(are_mappings): + raise TypeError( + "If a nested mapping is passed, all values" + " of the top level mapping must be " + "mappings" + ) + # passed a nested dict/Series + to_rep_dict = {} + value_dict = {} + + for k, v in items: + keys, values = list(zip(*v.items())) or ([], []) + if set(keys) & set(values): + raise ValueError( + "Replacement not allowed with " + "overlapping keys and values" + ) + to_rep_dict[k] = list(keys) + value_dict[k] = list(values) + + to_replace, value = to_rep_dict, value_dict + else: + to_replace, value = keys, values + + return self.replace( + to_replace, value, inplace=inplace, limit=limit, regex=regex + ) else: # need a non-zero len on all axes @@ -6664,7 +6742,9 @@ def replace( return self new_data = self._data + if is_dict_like(to_replace): + if is_dict_like(value): # {'A' : NA} -> {'A' : 0} res = self if inplace else self.copy() for c, src in to_replace.items(): @@ -6681,6 +6761,7 @@ def replace( # {'A': NA} -> 0 elif not is_list_like(value): + keys = [(k, src) for k, src in to_replace.items() if k in self] keys_len = len(keys) - 1 for i, (k, src) in enumerate(keys): @@ -6698,6 +6779,7 @@ def replace( elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): + if len(to_replace) != len(value): raise ValueError( "Replacement lists must match " @@ -6713,14 +6795,15 @@ def replace( ) else: # [NA, ''] -> 0 + new_data = self._data.replace( to_replace=to_replace, value=value, inplace=inplace, regex=regex ) elif to_replace is None: if not ( - is_re_compilable(regex) - or is_list_like(regex) - or is_dict_like(regex) + is_re_compilable(regex) + or is_list_like(regex) + or is_dict_like(regex) ): raise TypeError( "'regex' must be a string or a compiled " diff --git a/pandas/core/missing.py b/pandas/core/missing.py index bc81fbb7e1ce0..d5c884ba44294 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -26,40 +26,54 @@ def mask_missing(arr, values_to_mask): Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True """ + dtype, values_to_mask = infer_dtype_from_array(values_to_mask) + try: values_to_mask = np.array(values_to_mask, dtype=dtype) except Exception: values_to_mask = np.array(values_to_mask, dtype=object) + na_mask = isna(values_to_mask) + nonna = values_to_mask[~na_mask] + mask = None for x in nonna: if mask is None: + # numpy elementwise comparison warning + if is_numeric_v_string_like(arr, x): + + mask = False else: + mask = arr == x + # if x is a string and arr is not, then we get False and we must # expand the mask to size arr.shape if is_scalar(mask): mask = np.zeros(arr.shape, dtype=bool) else: + # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask |= False else: mask |= arr == x + if na_mask.any(): + if mask is None: mask = isna(arr) else: @@ -67,8 +81,10 @@ def mask_missing(arr, values_to_mask): # GH 21977 if mask is None: + mask = np.zeros(arr.shape, dtype=bool) + return mask @@ -78,6 +94,7 @@ def clean_fill_method(method, allow_nearest=False): return None if isinstance(method, str): + method = method.lower() if method == "ffill": method = "pad" @@ -94,6 +111,7 @@ def clean_fill_method(method, allow_nearest=False): expecting=expecting, method=method ) raise ValueError(msg) + return method @@ -573,6 +591,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): def get_fill_func(method): method = clean_fill_method(method) + return _fill_methods[method] diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..e4455c5c7d39d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4314,6 +4314,7 @@ def replace( limit=None, regex=False, method="pad", + replace_by_none=False ): return super().replace( to_replace=to_replace, @@ -4322,6 +4323,7 @@ def replace( limit=limit, regex=regex, method=method, + replace_by_none=replace_by_none ) @Appender(generic._shared_docs["shift"] % _shared_doc_kwargs) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 7b9e50ebbf342..4d7b850d8f703 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -519,6 +519,7 @@ def test_validate_bool_args(self): invalid_values = [1, "True", [1, 2, 3], 5.0] for value in invalid_values: + with pytest.raises(ValueError): super(DataFrame, df).rename_axis( mapper={"a": "x", "b": "y"}, axis=1, inplace=value diff --git a/requirements-dev.txt b/requirements-dev.txt index cf11a3ee28258..9a73952cc86e8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -31,7 +31,6 @@ ipywidgets nbformat notebook>=5.7.5 pip -blosc bottleneck>=1.2.1 ipykernel ipython>=5.6.0 @@ -54,4 +53,4 @@ xarray xlrd xlsxwriter xlwt -pyreadstat \ No newline at end of file +pyreadstat
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27996
2019-08-18T16:30:43Z
2019-08-18T16:32:32Z
null
2019-08-18T16:32:32Z
PLT: plot('line') or plot('area') produces wrong xlim in xaxis in 0.25.0
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 0be4ebc627b30..c5bc865e59fa5 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -168,6 +168,7 @@ Plotting - - Bug in :meth:`DataFrame.plot` producing incorrect legend markers when plotting multiple series on the same axis (:issue:`18222`) - Bug in :meth:`DataFrame.plot` when ``kind='box'`` and data contains datetime or timedelta data. These types are now automatically dropped (:issue:`22799`) +- Bug in :meth:`DataFrame.plot.line` and :meth:`DataFrame.plot.area` produce wrong xlim in x-axis (:issue:`27686`, :issue:`25160`, :issue:`24784`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 287cc2f4130f4..fbca57206e163 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -33,8 +33,6 @@ from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import ( _flatten, - _get_all_lines, - _get_xlim, _handle_shared_axes, _subplots, format_date_labels, @@ -1101,9 +1099,8 @@ def _make_plot(self): ) self._add_legend_handle(newlines[0], label, index=i) - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + # GH27686 set_xlim will truncate xaxis to fixed space + ax.relim() @classmethod def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 8472eb3a3d887..fd2913ca51ac3 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -343,27 +343,6 @@ def _flatten(axes): return np.array(axes) -def _get_all_lines(ax): - lines = ax.get_lines() - - if hasattr(ax, "right_ax"): - lines += ax.right_ax.get_lines() - - if hasattr(ax, "left_ax"): - lines += ax.left_ax.get_lines() - - return lines - - -def _get_xlim(lines): - left, right = np.inf, -np.inf - for l in lines: - x = l.get_xdata(orig=False) - left = min(np.nanmin(x), left) - right = max(np.nanmax(x), right) - return left, right - - def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 69070ea11e478..be87929b4545a 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -419,6 +419,8 @@ def test_get_finder(self): assert conv.get_finder("A") == conv._annual_finder assert conv.get_finder("W") == conv._daily_finder + # TODO: The finder should be retested due to wrong xlim values on x-axis + @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_daily(self): day_lst = [10, 40, 252, 400, 950, 2750, 10000] @@ -442,6 +444,8 @@ def test_finder_daily(self): assert rs1 == xpl1 assert rs2 == xpl2 + # TODO: The finder should be retested due to wrong xlim values on x-axis + @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_quarterly(self): yrs = [3.5, 11] @@ -465,6 +469,8 @@ def test_finder_quarterly(self): assert rs1 == xpl1 assert rs2 == xpl2 + # TODO: The finder should be retested due to wrong xlim values on x-axis + @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_monthly(self): yrs = [1.15, 2.5, 4, 11] @@ -498,6 +504,8 @@ def test_finder_monthly_long(self): xp = Period("1989Q1", "M").ordinal assert rs == xp + # TODO: The finder should be retested due to wrong xlim values on x-axis + @pytest.mark.xfail(reason="TODO: check details in GH28021") @pytest.mark.slow def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] @@ -522,7 +530,7 @@ def test_finder_minutely(self): _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] + rs = xaxis.get_majorticklocs()[1] xp = Period("1/1/1999", freq="Min").ordinal assert rs == xp @@ -534,7 +542,7 @@ def test_finder_hourly(self): _, ax = self.plt.subplots() ser.plot(ax=ax) xaxis = ax.get_xaxis() - rs = xaxis.get_majorticklocs()[0] + rs = xaxis.get_majorticklocs()[1] xp = Period("1/1/1999", freq="H").ordinal assert rs == xp @@ -1410,7 +1418,9 @@ def test_plot_outofbounds_datetime(self): def test_format_timedelta_ticks_narrow(self): - expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in range(10)] + expected_labels = [ + "00:00:00.0000000{:0>2d}".format(i) for i in np.arange(0, 10, 2) + ] rng = timedelta_range("0", periods=10, freq="ns") df = DataFrame(np.random.randn(len(rng), 3), rng) @@ -1420,8 +1430,8 @@ def test_format_timedelta_ticks_narrow(self): labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] - assert len(result_labels) == len(expected_labels) - assert result_labels == expected_labels + assert (len(result_labels) - 2) == len(expected_labels) + assert result_labels[1:-1] == expected_labels def test_format_timedelta_ticks_wide(self): expected_labels = [ @@ -1444,8 +1454,8 @@ def test_format_timedelta_ticks_wide(self): labels = ax.get_xticklabels() result_labels = [x.get_text() for x in labels] - assert len(result_labels) == len(expected_labels) - assert result_labels == expected_labels + assert (len(result_labels) - 2) == len(expected_labels) + assert result_labels[1:-1] == expected_labels def test_timedelta_plot(self): # test issue #8711 diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 7fdc0252b71e3..f672cd3a6aa58 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -3177,6 +3177,58 @@ def test_x_multiindex_values_ticks(self): assert labels_position["(2013, 1)"] == 2.0 assert labels_position["(2013, 2)"] == 3.0 + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_xlim_plot_line(self, kind): + # test if xlim is set correctly in plot.line and plot.area + # GH 27686 + df = pd.DataFrame([2, 4], index=[1, 2]) + ax = df.plot(kind=kind) + xlims = ax.get_xlim() + assert xlims[0] < 1 + assert xlims[1] > 2 + + def test_xlim_plot_line_correctly_in_mixed_plot_type(self): + # test if xlim is set correctly when ax contains multiple different kinds + # of plots, GH 27686 + fig, ax = self.plt.subplots() + + indexes = ["k1", "k2", "k3", "k4"] + df = pd.DataFrame( + { + "s1": [1000, 2000, 1500, 2000], + "s2": [900, 1400, 2000, 3000], + "s3": [1500, 1500, 1600, 1200], + "secondary_y": [1, 3, 4, 3], + }, + index=indexes, + ) + df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False) + df[["secondary_y"]].plot(ax=ax, secondary_y=True) + + xlims = ax.get_xlim() + assert xlims[0] < 0 + assert xlims[1] > 3 + + # make sure axis labels are plotted correctly as well + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + assert xticklabels == indexes + + def test_subplots_sharex_false(self): + # test when sharex is set to False, two plots should have different + # labels, GH 25160 + df = pd.DataFrame(np.random.rand(10, 2)) + df.iloc[5:, 1] = np.nan + df.iloc[:5, 0] = np.nan + + figs, axs = self.plt.subplots(2, 1) + df.plot.line(ax=axs, subplots=True, sharex=False) + + expected_ax1 = np.arange(4.5, 10, 0.5) + expected_ax2 = np.arange(-0.5, 5, 0.5) + + tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1) + tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2) + def _generate_4_axes_via_gridspec(): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 111c3a70fc09c..2c4c8aa7461a3 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -897,3 +897,15 @@ def test_plot_accessor_updates_on_inplace(self): _, ax = self.plt.subplots() after = ax.xaxis.get_ticklocs() tm.assert_numpy_array_equal(before, after) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_plot_xlim_for_series(self, kind): + # test if xlim is also correctly plotted in Series for line and area + # GH 27686 + s = Series([2, 3]) + _, ax = self.plt.subplots() + s.plot(kind=kind, ax=ax) + xlims = ax.get_xlim() + + assert xlims[0] < 0 + assert xlims[1] > 1
Due to previous PRs, there is an issue with xlim wrongly plotted for lines. I digged into database a bit, and found it also affects `plot(kind='area')` because they share the same `LinePlot`. And this issue is produced in both DataFrame and Series. Now looks like the issue is gone: ![Screen Shot 2019-08-18 at 2 18 32 PM](https://user-images.githubusercontent.com/9269816/63224355-2086cf00-c1c3-11e9-8657-5c30817de9c8.png) Duplicated closed issue 27796 is solved as well: ![Screen Shot 2019-08-18 at 2 18 44 PM](https://user-images.githubusercontent.com/9269816/63224357-24b2ec80-c1c3-11e9-8c95-3ec6befa4e8f.png) Also looks like issue in #25160 is gone as well. ![Screen Shot 2019-08-18 at 2 41 22 PM](https://user-images.githubusercontent.com/9269816/63224575-62fddb00-c1c6-11e9-8714-b386d1d43501.png) The same to #24784 looks like issue is also gone ![Screen Shot 2019-08-18 at 8 03 56 PM](https://user-images.githubusercontent.com/9269816/63228453-678cb880-c1f3-11e9-9a61-a07731889b61.png) - [x] closes #27686 , #25160, #24784 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27993
2019-08-18T11:41:09Z
2019-08-20T14:18:56Z
2019-08-20T14:18:55Z
2019-08-20T14:18:59Z
BUG/TST: fix and test for timezone drop in GroupBy.shift/bfill/ffill
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 1cdf213d81a74..69f324211e5b2 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -78,6 +78,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`). +- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`) - - - diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 55def024cb1d4..e010e615e176e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2263,26 +2263,28 @@ def _get_cythonized_result( base_func = getattr(libgroupby, how) for name, obj in self._iterate_slices(): + values = obj._data._values + if aggregate: result_sz = ngroups else: - result_sz = len(obj.values) + result_sz = len(values) if not cython_dtype: - cython_dtype = obj.values.dtype + cython_dtype = values.dtype result = np.zeros(result_sz, dtype=cython_dtype) func = partial(base_func, result, labels) inferences = None if needs_values: - vals = obj.values + vals = values if pre_processing: vals, inferences = pre_processing(vals) func = partial(func, vals) if needs_mask: - mask = isna(obj.values).view(np.uint8) + mask = isna(values).view(np.uint8) func = partial(func, mask) if needs_ngroups: @@ -2291,7 +2293,7 @@ def _get_cythonized_result( func(**kwargs) # Call func to modify indexer values in place if result_is_index: - result = algorithms.take_nd(obj.values, result) + result = algorithms.take_nd(values, result) if post_processing: result = post_processing(result, inferences) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 4556b22b57279..bec5cbc5fecb8 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1882,3 +1882,69 @@ def test_groupby_axis_1(group_name): results = df.groupby(group_name, axis=1).sum() expected = df.T.groupby(group_name).sum().T assert_frame_equal(results, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ( + "shift", + { + "time": [ + None, + None, + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + None, + None, + ] + }, + ), + ( + "bfill", + { + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ] + }, + ), + ( + "ffill", + { + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ] + }, + ), + ], +) +def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected): + # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill + tz = tz_naive_fixture + data = { + "id": ["A", "B", "A", "B", "A", "B"], + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + None, + None, + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ], + } + df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz)) + + grouped = df.groupby("id") + result = getattr(grouped, op)() + expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz)) + assert_frame_equal(result, expected)
closes #19995 Timezone info is dropped in GroupBy.shift, bfill, and ffill since index calculated by Cythonized functions are applied to NumPy representation of values without timezone. Could you please find and review this fix? Many thanks, -noritada ``` In [1]: import pandas as pd ...: pd.__version__ Out[1]: '0.24.2' In [2]: df = pd.DataFrame([ ...: ['a', pd.Timestamp('2019-01-01 00:00:00+09:00')], ...: ['b', pd.Timestamp('2019-01-01 00:05:00+09:00')], ...: ['a', None], ...: ['b', None], ...: ['a', pd.Timestamp('2019-01-01 00:20:00+09:00')], ...: ['b', pd.Timestamp('2019-01-01 00:25:00+09:00')], ...: ], columns=['id', 'time']) In [3]: df['time'].shift(1) Out[3]: 0 NaT 1 2019-01-01 00:00:00+09:00 2 2019-01-01 00:05:00+09:00 3 NaT 4 NaT 5 2019-01-01 00:20:00+09:00 Name: time, dtype: datetime64[ns, pytz.FixedOffset(540)] In [4]: df.groupby('id')['time'].shift(1) Out[4]: 0 NaT 1 NaT 2 2018-12-31 15:00:00 3 2018-12-31 15:05:00 4 NaT 5 NaT Name: time, dtype: datetime64[ns] In [5]: df.groupby('id')['time'].bfill() Out[5]: 0 2018-12-31 15:00:00 1 2018-12-31 15:05:00 2 2018-12-31 15:20:00 3 2018-12-31 15:25:00 4 2018-12-31 15:20:00 5 2018-12-31 15:25:00 Name: time, dtype: datetime64[ns] In [6]: df.groupby('id')['time'].ffill() Out[6]: 0 2018-12-31 15:00:00 1 2018-12-31 15:05:00 2 2018-12-31 15:00:00 3 2018-12-31 15:05:00 4 2018-12-31 15:20:00 5 2018-12-31 15:25:00 Name: time, dtype: datetime64[ns] ``` - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27992
2019-08-18T11:25:59Z
2019-09-09T12:06:01Z
2019-09-09T12:06:01Z
2019-09-09T15:24:05Z
DataFrame html repr: also follow min_rows setting
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 34b149a6b8261..3116d4be30cbf 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -105,6 +105,7 @@ I/O - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) +- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the html repr in the notebook (:issue:`27991`). - Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..b377e571a5abc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -669,15 +669,18 @@ def _repr_html_(self): if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") + min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") - return self.to_html( + formatter = fmt.DataFrameFormatter( + self, max_rows=max_rows, + min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, - notebook=True, ) + return formatter.to_html(notebook=True) else: return None diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index a048e3bb867bd..c0451a0672c89 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -471,28 +471,35 @@ def test_repr_min_rows(self): # default setting no truncation even if above min_rows assert ".." not in repr(df) + assert ".." not in df._repr_html_() df = pd.DataFrame({"a": range(61)}) # default of max_rows 60 triggers truncation if above assert ".." in repr(df) + assert ".." in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 4): # truncated after first two rows assert ".." in repr(df) assert "2 " not in repr(df) + assert "..." in df._repr_html_() + assert "<td>2</td>" not in df._repr_html_() with option_context("display.max_rows", 12, "display.min_rows", None): # when set to None, follow value of max_rows assert "5 5" in repr(df) + assert "<td>5</td>" in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 12): # when set value higher as max_rows, use the minimum assert "5 5" not in repr(df) + assert "<td>5</td>" not in df._repr_html_() with option_context("display.max_rows", None, "display.min_rows", 12): # max_rows of None -> never truncate assert ".." not in repr(df) + assert ".." not in df._repr_html_() def test_str_max_colwidth(self): # GH 7856
Follow-up on https://github.com/pandas-dev/pandas/pull/27095, where I forgot to apply this setting in the html repr as well. Thoughts on including this in 0.25.1 or not? It's kind of a oversight of the 0.25 feature, but also an actual change of course in the user experience in the notebook.
https://api.github.com/repos/pandas-dev/pandas/pulls/27991
2019-08-18T09:20:31Z
2019-08-20T19:24:47Z
2019-08-20T19:24:47Z
2019-08-20T19:24:49Z
DOC: remove savefig references from the docs v0.7.3
diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index a8697f60d7467..020cf3bdc2d59 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -25,8 +25,6 @@ New features from pandas.tools.plotting import scatter_matrix scatter_matrix(df, alpha=0.2) # noqa F821 -.. image:: ../savefig/scatter_matrix_kde.png - :width: 5in - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for :ref:`stacked bar plots <visualization.barplot>`. @@ -35,15 +33,11 @@ New features df.plot(kind='bar', stacked=True) # noqa F821 -.. image:: ../savefig/bar_plot_stacked_ex.png - :width: 4in .. code-block:: python df.plot(kind='barh', stacked=True) # noqa F821 -.. image:: ../savefig/barh_plot_stacked_ex.png - :width: 4in - Add log x and y :ref:`scaling options <visualization.basic>` to ``DataFrame.plot`` and ``Series.plot``
- [x] closes #27971 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27990
2019-08-18T06:04:01Z
2019-08-18T08:23:48Z
2019-08-18T08:23:48Z
2019-08-18T08:23:49Z
BUG: Exception when calling pandas.isnull() with a pandas type object
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 34b149a6b8261..58eb83b7d57ae 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -89,7 +89,7 @@ Indexing Missing ^^^^^^^ -- +- Fixed bug where object with no data attribute were not being caught (:issue:`27482`) - - diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6f599a6be6021..14ef46cf44cc6 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -128,6 +128,8 @@ def isna(obj): def _isna_new(obj): + if isinstance(obj,type): + return False if is_scalar(obj): return libmissing.checknull(obj) # hack (for now) because MI registers as ndarray diff --git a/scripts/tests/test_27482.py b/scripts/tests/test_27482.py new file mode 100644 index 0000000000000..e3b637728505b --- /dev/null +++ b/scripts/tests/test_27482.py @@ -0,0 +1,19 @@ +import pandas as pd +import pytest +import unittest + +class TestStringMethods(unittest.TestCase): + + def test1(self): + x = pd.Series([1,2,3,4]) + xt = type(x) + assert pd.isnull(xt)==False,"Passed" + + def test2(self): + y = pd.DataFrame({"col": [1,2,3,4]}) + yt = type(y) + assert pd.isnull(yt)==False,"Passed" + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file
- [x] closes #27482 - [x] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27989
2019-08-18T05:20:34Z
2019-08-20T14:10:17Z
null
2019-08-21T06:01:05Z
DOC: Fix GL01 and GL02 errors in the docstrings
diff --git a/pandas/conftest.py b/pandas/conftest.py index 2cf7bf6a6df41..b032e14d8f7e1 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -123,18 +123,22 @@ def ip(): @pytest.fixture(params=[True, False, None]) def observed(request): - """ pass in the observed keyword to groupby for [True, False] + """ + Pass in the observed keyword to groupby for [True, False] This indicates whether categoricals should return values for values which are not in the grouper [False / None], or only values which appear in the grouper [True]. [None] is supported for future compatibility if we decide to change the default (and would need to warn if this - parameter is not passed)""" + parameter is not passed). + """ return request.param @pytest.fixture(params=[True, False, None]) def ordered_fixture(request): - """Boolean 'ordered' parameter for Categorical.""" + """ + Boolean 'ordered' parameter for Categorical. + """ return request.param @@ -234,7 +238,8 @@ def cython_table_items(request): def _get_cython_table_params(ndframe, func_names_and_expected): - """combine frame, functions from SelectionMixin._cython_table + """ + Combine frame, functions from SelectionMixin._cython_table keys and expected result. Parameters @@ -242,7 +247,7 @@ def _get_cython_table_params(ndframe, func_names_and_expected): ndframe : DataFrame or Series func_names_and_expected : Sequence of two items The first item is a name of a NDFrame method ('sum', 'prod') etc. - The second item is the expected return value + The second item is the expected return value. Returns ------- @@ -341,7 +346,8 @@ def strict_data_files(pytestconfig): @pytest.fixture def datapath(strict_data_files): - """Get the path to a data file. + """ + Get the path to a data file. Parameters ---------- @@ -375,7 +381,9 @@ def deco(*args): @pytest.fixture def iris(datapath): - """The iris dataset as a DataFrame.""" + """ + The iris dataset as a DataFrame. + """ return pd.read_csv(datapath("data", "iris.csv")) @@ -504,7 +512,8 @@ def tz_aware_fixture(request): @pytest.fixture(params=STRING_DTYPES) def string_dtype(request): - """Parametrized fixture for string dtypes. + """ + Parametrized fixture for string dtypes. * str * 'str' @@ -515,7 +524,8 @@ def string_dtype(request): @pytest.fixture(params=BYTES_DTYPES) def bytes_dtype(request): - """Parametrized fixture for bytes dtypes. + """ + Parametrized fixture for bytes dtypes. * bytes * 'bytes' @@ -525,7 +535,8 @@ def bytes_dtype(request): @pytest.fixture(params=OBJECT_DTYPES) def object_dtype(request): - """Parametrized fixture for object dtypes. + """ + Parametrized fixture for object dtypes. * object * 'object' @@ -535,7 +546,8 @@ def object_dtype(request): @pytest.fixture(params=DATETIME64_DTYPES) def datetime64_dtype(request): - """Parametrized fixture for datetime64 dtypes. + """ + Parametrized fixture for datetime64 dtypes. * 'datetime64[ns]' * 'M8[ns]' @@ -545,7 +557,8 @@ def datetime64_dtype(request): @pytest.fixture(params=TIMEDELTA64_DTYPES) def timedelta64_dtype(request): - """Parametrized fixture for timedelta64 dtypes. + """ + Parametrized fixture for timedelta64 dtypes. * 'timedelta64[ns]' * 'm8[ns]' diff --git a/pandas/io/html.py b/pandas/io/html.py index 9d2647f226f00..490c574463b9b 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -1,4 +1,5 @@ -""":mod:`pandas.io.html` is a module containing functionality for dealing with +""" +:mod:`pandas.io.html` is a module containing functionality for dealing with HTML IO. """ @@ -58,7 +59,8 @@ def _importers(): def _remove_whitespace(s, regex=_RE_WHITESPACE): - """Replace extra whitespace inside of a string with a single space. + """ + Replace extra whitespace inside of a string with a single space. Parameters ---------- @@ -77,7 +79,8 @@ def _remove_whitespace(s, regex=_RE_WHITESPACE): def _get_skiprows(skiprows): - """Get an iterator given an integer, slice or container. + """ + Get an iterator given an integer, slice or container. Parameters ---------- @@ -107,7 +110,8 @@ def _get_skiprows(skiprows): def _read(obj): - """Try to read from a url, file or string. + """ + Try to read from a url, file or string. Parameters ---------- @@ -136,7 +140,8 @@ def _read(obj): class _HtmlFrameParser: - """Base class for parsers that parse HTML into DataFrames. + """ + Base class for parsers that parse HTML into DataFrames. Parameters ---------- @@ -515,7 +520,8 @@ def _handle_hidden_tables(self, tbl_list, attr_name): class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): - """HTML to DataFrame parser that uses BeautifulSoup under the hood. + """ + HTML to DataFrame parser that uses BeautifulSoup under the hood. See Also -------- @@ -622,7 +628,8 @@ def _build_xpath_expr(attrs): class _LxmlFrameParser(_HtmlFrameParser): - """HTML to DataFrame parser that uses lxml under the hood. + """ + HTML to DataFrame parser that uses lxml under the hood. Warning ------- @@ -937,7 +944,8 @@ def read_html( keep_default_na=True, displayed_only=True, ): - r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. + r""" + Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ----------
- [x] closes #27986 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Fixed docstrings text that aren’t on the next line after opening `"""` and followed immediately by the line with closing `"""`. Issue #27986
https://api.github.com/repos/pandas-dev/pandas/pulls/27988
2019-08-18T04:54:18Z
2019-08-24T09:32:55Z
2019-08-24T09:32:55Z
2019-08-24T09:34:15Z
index fixed when join with empty dataframe
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..66324aff60270 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9,6 +9,7 @@ labeling information """ import collections +import pandas from collections import OrderedDict, abc import functools from io import StringIO @@ -4382,7 +4383,7 @@ def set_index( len_self=len(self), len_col=len(arrays[-1]) ) ) - + index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: @@ -7187,24 +7188,33 @@ def _join_compat( other = DataFrame({other.name: other}) if isinstance(other, DataFrame): - return merge( - self, - other, - left_on=on, - how=how, - left_index=on is None, - right_index=True, - suffixes=(lsuffix, rsuffix), - sort=sort, - ) - else: - if on is not None: - raise ValueError( - "Joining multiple DataFrames only supported for joining on index" + if on is None: + return merge( + self, + other, + left_on=on, + how=how, + left_index=True, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort ) - + else: + result = merge( + self, + other, + left_on=on, + how=how, + left_index=False, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort + ) + #Range123 + result.set_index([pandas.Series([i for i in range(len(result))])], inplace=True) + + return result frames = [self] + list(other) - can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27987
2019-08-18T04:50:17Z
2019-08-18T04:52:25Z
null
2019-08-18T04:52:25Z
Pandas tseries offsets DateOffset
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index fc07f2a484102..203994b500f2c 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -4,7 +4,6 @@ from ..pandas_vb_common import BaseIO - class ReadJSON(BaseIO): fname = "__test__.json" diff --git a/pandas/tests/io/parser/test_read_csv.py b/pandas/tests/io/parser/test_read_csv.py new file mode 100644 index 0000000000000..cef1fda37d38a --- /dev/null +++ b/pandas/tests/io/parser/test_read_csv.py @@ -0,0 +1,154 @@ +import pytest +import csv +import os +import sys + +rootpath = os.path.dirname(os.path.abspath(__file__)) + +sys.path.append(os.path.join(rootpath, "pandas/io")) +from pandas.io import parsers + +TMP_PATH = "tmp" + +""" +To run test, run 'python path/to/test_parsers.py' + + test_read_csv_without_encoding_kwarg returns result of read_csv method. + - if exception is raised from method, the result returned is the exception. +""" + + +def test_read_csv_without_encoding_kwarg(file): + try: + result = parsers.read_csv(file) + except Exception as e: + result = e + return result + + +def write_csv_file(filename, data, encoding, delimiter=",", newline=""): + with open(filename, "w", newline=newline, encoding=encoding) as csv_file: + writer = csv.writer(csv_file, delimiter=delimiter) + # for row in data: + writer.writerow(data) + return filename + + +def test(): + test_results = {} + test_dtypes = [ + "ascii", + "big5", + "big5hkscs", + "cp037", + "cp273", + "cp424", + "cp437", + "cp500", + "cp720", + "cp737", + "cp775", + "cp850", + "cp852", + "cp855", + "cp856", + "cp857", + "cp858", + "cp860", + "cp861", + "cp862", + "cp863", + "cp864", + "cp865", + "cp866", + "cp869", + "cp874", + "cp875", + "cp932", + "cp949", + "cp950", + "cp1006", + "cp1026", + "cp1125", + "cp1140", + "cp1250", + "cp1251", + "cp1252", + "cp1253", + "cp1254", + "cp1255", + "cp1256", + "cp1257", + "cp1258", + "cp65001", + "euc_jp", + "euc_jis_2004", + "euc_jisx0213", + "euc_kr", + "gb2312", + "gbk", + "gb18030", + "hz", + "iso2022_jp", + "iso2022_jp_1", + "iso2022_jp_2", + "iso2022_jp_2004", + "iso2022_jp_3", + "iso2022_jp_ext", + "iso2022_kr", + "latin_1", + "iso8859_2", + "iso8859_3", + "iso8859_4", + "iso8859_5", + "iso8859_6", + "iso8859_7", + "iso8859_8", + "iso8859_9", + "iso8859_10", + "iso8859_11", + "iso8859_13", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "johab", + "koi8_r", + "koi8_t", + "koi8_u", + "kz1048", + "mac_cyrillic", + "mac_greek", + "mac_iceland", + "mac_latin2", + "mac_roman", + "mac_turkish", + "ptcp154", + "shift_jis", + "shift_jis_2004", + "shift_jisx0213", + "utf_32", + "utf_32_be", + "utf_32_le", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_7", + "utf_8", + "utf_8_sig", + ] + + data = """ + one,two,three + 1,2,3 + uno,dos,tres + """ + for i, dtype in enumerate(test_dtypes): + file = write_csv_file(f"test{i}.csv", data, dtype) + result = test_read_csv_without_encoding_kwarg(file) + test_results[dtype] = result + + print("test results: ", test_results) + + +if __name__ == "__main__": + test()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry input: python .\scripts\validate_docstrings.py pandas.tseries.offsets.DateOffset --errors=F821 output: Docstring for "pandas.tseries.offsets.DateOffset" correct. :)
https://api.github.com/repos/pandas-dev/pandas/pulls/27985
2019-08-18T01:55:05Z
2019-08-20T14:07:08Z
null
2019-08-20T14:07:09Z
Support str param type for partition_cols in to_parquet function
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 0be4ebc627b30..e504a29748b38 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -29,7 +29,7 @@ Enhancements Other enhancements ^^^^^^^^^^^^^^^^^^ -- +- String support for paramater partition_cols in the :func:`pandas.to_parquet` (:issue:`27117`) - .. _whatsnew_1000.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..ca63e7452b873 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2156,9 +2156,10 @@ def to_parquet( .. versionadded:: 0.24.0 - partition_cols : list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + partition_cols : list or string, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + String identifies a single column to be partitioned. .. versionadded:: 0.24.0 @@ -2166,6 +2167,11 @@ def to_parquet( Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. + .. versionchanged:: 1.0.0 + + partition_cols + Added ability to pass in a string for a single column name + See Also -------- read_parquet : Read a parquet file. diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..acf97e4b7a161 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -235,15 +235,23 @@ def to_parquet( .. versionadded:: 0.24.0 - partition_cols : list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + partition_cols : list or string, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + String identifies a single column to be partitioned. .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine + + .. versionchanged:: 1.0.0 + + partition_cols + Added ability to pass in a string for a single column name """ + if isinstance(partition_cols, str): + partition_cols = [partition_cols] impl = get_engine(engine) return impl.write( df, diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d634859e72d7b..0b2d3a07980fa 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -473,6 +473,19 @@ def test_partition_cols_supported(self, pa, df_full): assert len(dataset.partitions.partition_names) == 2 assert dataset.partitions.partition_names == set(partition_cols) + def test_partition_cols_string(self, pa, df_full): + # GH #23283 + partition_cols = "bool" + partition_cols_list = [partition_cols] + df = df_full + with tm.ensure_clean_dir() as path: + df.to_parquet(path, partition_cols=partition_cols, compression=None) + import pyarrow.parquet as pq + + dataset = pq.ParquetDataset(path, validate_schema=False) + assert len(dataset.partitions.partition_names) == 1 + assert dataset.partitions.partition_names == set(partition_cols_list) + def test_empty_dataframe(self, pa): # GH #27339 df = pd.DataFrame() @@ -543,6 +556,23 @@ def test_partition_cols_supported(self, fp, df_full): actual_partition_cols = fastparquet.ParquetFile(path, False).cats assert len(actual_partition_cols) == 2 + def test_partition_cols_string(self, fp, df_full): + # GH #23283 + partition_cols = "bool" + df = df_full + with tm.ensure_clean_dir() as path: + df.to_parquet( + path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(path) + import fastparquet # noqa: F811 + + actual_partition_cols = fastparquet.ParquetFile(path, False).cats + assert len(actual_partition_cols) == 1 + def test_partition_on_supported(self, fp, df_full): # GH #23283 partition_cols = ["bool", "int"]
- [x] closes #27117 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Not sure where to add whatsnew entry #PandasHack2019
https://api.github.com/repos/pandas-dev/pandas/pulls/27984
2019-08-18T01:46:08Z
2019-11-07T21:09:03Z
null
2019-11-07T21:09:04Z
Issue 27117
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..627acea4951f9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2156,9 +2156,10 @@ def to_parquet( .. versionadded:: 0.24.0 - partition_cols : list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + partition_cols : list or string, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + String identifies a single column to be partitioned. .. versionadded:: 0.24.0 diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..c01163cfd237c 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -235,15 +235,18 @@ def to_parquet( .. versionadded:: 0.24.0 - partition_cols : list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + partition_cols : list or string, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + String identifies a single column to be partitioned. .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine """ + if isinstance(partition_cols, str): + partition_cols = [partition_cols] impl = get_engine(engine) return impl.write( df, diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d634859e72d7b..f58ae25003b99 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -330,7 +330,7 @@ def test_write_index(self, engine): # non-default index for index in indexes: df.index = index - check_round_trip(df, engine, check_names=check_names) + check_round_trip(df, engine, check_names=chetest_partition_cols_supportedck_names) # index with meta-data df.index = [0, 1, 2] @@ -416,7 +416,7 @@ def test_basic_subset_columns(self, pa, df_full): # GH18628 df = df_full - # additional supported types for pyarrow + # additional supported types for pyarrowtest_partition_cols_supported df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels") check_round_trip( @@ -473,6 +473,18 @@ def test_partition_cols_supported(self, pa, df_full): assert len(dataset.partitions.partition_names) == 2 assert dataset.partitions.partition_names == set(partition_cols) + def test_partition_cols_string(self, pa, df_full): + # GH #23283 + partition_cols = 'bool' + df = df_full + with tm.ensure_clean_dir() as path: + df.to_parquet(path, partition_cols=partition_cols, compression=None) + import pyarrow.parquet as pq + + dataset = pq.ParquetDataset(path, validate_schema=False) + assert len(dataset.partitions.partition_names) == 1 + assert dataset.partitions.partition_names == set([partition_cols]) + def test_empty_dataframe(self, pa): # GH #27339 df = pd.DataFrame() @@ -543,6 +555,23 @@ def test_partition_cols_supported(self, fp, df_full): actual_partition_cols = fastparquet.ParquetFile(path, False).cats assert len(actual_partition_cols) == 2 + def test_partition_cols_string(self, fp, df_full): + # GH #23283 + partition_cols = 'bool' + df = df_full + with tm.ensure_clean_dir() as path: + df.to_parquet( + path, + engine="fastparquet", + partition_cols=partition_cols, + compression=None, + ) + assert os.path.exists(path) + import fastparquet # noqa: F811 + + actual_partition_cols = fastparquet.ParquetFile(path, False).cats + assert len(actual_partition_cols) == 1 + def test_partition_on_supported(self, fp, df_full): # GH #23283 partition_cols = ["bool", "int"]
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27983
2019-08-18T01:14:53Z
2019-08-18T01:16:54Z
null
2019-08-18T01:16:54Z
Read csv test
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index fc07f2a484102..203994b500f2c 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -4,7 +4,6 @@ from ..pandas_vb_common import BaseIO - class ReadJSON(BaseIO): fname = "__test__.json" diff --git a/pandas/tests/io/parser/test_read_csv.py b/pandas/tests/io/parser/test_read_csv.py new file mode 100644 index 0000000000000..cef1fda37d38a --- /dev/null +++ b/pandas/tests/io/parser/test_read_csv.py @@ -0,0 +1,154 @@ +import pytest +import csv +import os +import sys + +rootpath = os.path.dirname(os.path.abspath(__file__)) + +sys.path.append(os.path.join(rootpath, "pandas/io")) +from pandas.io import parsers + +TMP_PATH = "tmp" + +""" +To run test, run 'python path/to/test_parsers.py' + + test_read_csv_without_encoding_kwarg returns result of read_csv method. + - if exception is raised from method, the result returned is the exception. +""" + + +def test_read_csv_without_encoding_kwarg(file): + try: + result = parsers.read_csv(file) + except Exception as e: + result = e + return result + + +def write_csv_file(filename, data, encoding, delimiter=",", newline=""): + with open(filename, "w", newline=newline, encoding=encoding) as csv_file: + writer = csv.writer(csv_file, delimiter=delimiter) + # for row in data: + writer.writerow(data) + return filename + + +def test(): + test_results = {} + test_dtypes = [ + "ascii", + "big5", + "big5hkscs", + "cp037", + "cp273", + "cp424", + "cp437", + "cp500", + "cp720", + "cp737", + "cp775", + "cp850", + "cp852", + "cp855", + "cp856", + "cp857", + "cp858", + "cp860", + "cp861", + "cp862", + "cp863", + "cp864", + "cp865", + "cp866", + "cp869", + "cp874", + "cp875", + "cp932", + "cp949", + "cp950", + "cp1006", + "cp1026", + "cp1125", + "cp1140", + "cp1250", + "cp1251", + "cp1252", + "cp1253", + "cp1254", + "cp1255", + "cp1256", + "cp1257", + "cp1258", + "cp65001", + "euc_jp", + "euc_jis_2004", + "euc_jisx0213", + "euc_kr", + "gb2312", + "gbk", + "gb18030", + "hz", + "iso2022_jp", + "iso2022_jp_1", + "iso2022_jp_2", + "iso2022_jp_2004", + "iso2022_jp_3", + "iso2022_jp_ext", + "iso2022_kr", + "latin_1", + "iso8859_2", + "iso8859_3", + "iso8859_4", + "iso8859_5", + "iso8859_6", + "iso8859_7", + "iso8859_8", + "iso8859_9", + "iso8859_10", + "iso8859_11", + "iso8859_13", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "johab", + "koi8_r", + "koi8_t", + "koi8_u", + "kz1048", + "mac_cyrillic", + "mac_greek", + "mac_iceland", + "mac_latin2", + "mac_roman", + "mac_turkish", + "ptcp154", + "shift_jis", + "shift_jis_2004", + "shift_jisx0213", + "utf_32", + "utf_32_be", + "utf_32_le", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_7", + "utf_8", + "utf_8_sig", + ] + + data = """ + one,two,three + 1,2,3 + uno,dos,tres + """ + for i, dtype in enumerate(test_dtypes): + file = write_csv_file(f"test{i}.csv", data, dtype) + result = test_read_csv_without_encoding_kwarg(file) + test_results[dtype] = result + + print("test results: ", test_results) + + +if __name__ == "__main__": + test()
- [ ] closes #27655 - [ 1 ] tests added / passed - [ 1 ] passes `black pandas` - *[ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Do not close issue, waiting for user response before continuing further testing. *Returns "fatal: bad revision 'upstream/master'"
https://api.github.com/repos/pandas-dev/pandas/pulls/27982
2019-08-18T00:16:23Z
2019-09-05T15:59:49Z
null
2019-09-05T15:59:49Z
DOC: Fixing flake8 error in basics.rst
diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst index 3f6f56376861f..66db7bbedce2a 100644 --- a/doc/source/getting_started/basics.rst +++ b/doc/source/getting_started/basics.rst @@ -762,8 +762,15 @@ Compare the following .. code-block:: python # f, g, and h are functions taking and returning ``DataFrames`` + >>> def h(df): + ... return df * df + >>> def g(df, arg1): + ... return df + arg1 + >>> def f(df, arg2, arg3): + ... return df * (arg2 + arg3) >>> f(g(h(df), arg1=1), arg2=2, arg3=3) + with the equivalent .. code-block:: python
- [x] closes #24173 and follow up on it - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27981
2019-08-18T00:10:16Z
2019-10-13T00:32:20Z
null
2019-10-13T00:32:20Z
Issue - 27482 : fixed isnull attribute for type objects
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 34b149a6b8261..58eb83b7d57ae 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -89,7 +89,7 @@ Indexing Missing ^^^^^^^ -- +- Fixed bug where object with no data attribute were not being caught (:issue:`27482`) - - diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6f599a6be6021..982347de69b1e 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -2,7 +2,6 @@ missing types & inference """ import numpy as np - from pandas._config import get_option from pandas._libs import lib @@ -46,7 +45,6 @@ isposinf_scalar = libmissing.isposinf_scalar isneginf_scalar = libmissing.isneginf_scalar - def isna(obj): """ Detect missing values for an array-like object. @@ -128,6 +126,12 @@ def isna(obj): def _isna_new(obj): + try: + # If any object doesn't have an attribute data + if obj._data == None: + return False + except: + pass if is_scalar(obj): return libmissing.checknull(obj) # hack (for now) because MI registers as ndarray @@ -152,6 +156,7 @@ def _isna_new(obj): elif hasattr(obj, "__array__"): return _isna_ndarraylike(np.asarray(obj)) else: + print("Test") return obj is None @@ -222,8 +227,10 @@ def _isna_ndarraylike(obj): else: values = obj + dtype = values.dtype + if is_extension: if isinstance(obj, (ABCIndexClass, ABCSeries)): values = obj._values @@ -254,7 +261,6 @@ def _isna_ndarraylike(obj): # box if isinstance(obj, ABCSeries): result = obj._constructor(result, index=obj.index, name=obj.name, copy=False) - return result diff --git a/scripts/tests/test_27482.py b/scripts/tests/test_27482.py new file mode 100644 index 0000000000000..e3b637728505b --- /dev/null +++ b/scripts/tests/test_27482.py @@ -0,0 +1,19 @@ +import pandas as pd +import pytest +import unittest + +class TestStringMethods(unittest.TestCase): + + def test1(self): + x = pd.Series([1,2,3,4]) + xt = type(x) + assert pd.isnull(xt)==False,"Passed" + + def test2(self): + y = pd.DataFrame({"col": [1,2,3,4]}) + yt = type(y) + assert pd.isnull(yt)==False,"Passed" + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file
- [yes] closes #27482 - [yes] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [yes] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27980
2019-08-18T00:04:08Z
2019-08-20T14:04:57Z
null
2019-08-20T14:04:57Z
Johnward
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index fc07f2a484102..203994b500f2c 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -4,7 +4,6 @@ from ..pandas_vb_common import BaseIO - class ReadJSON(BaseIO): fname = "__test__.json" diff --git a/pandas/tests/io/parser/test_read_csv.py b/pandas/tests/io/parser/test_read_csv.py new file mode 100644 index 0000000000000..9e13fc620707c --- /dev/null +++ b/pandas/tests/io/parser/test_read_csv.py @@ -0,0 +1,151 @@ +import pytest +import csv +import os +import sys + +rootpath = os.path.dirname(os.path.abspath(__file__)) + +sys.path.append(os.path.join(rootpath, "pandas/io")) +from pandas.io import parsers + +TMP_PATH = "tmp" + +""" +To run test, run 'python path/to/test_parsers.py' + + test_read_csv_without_encoding_kwarg returns result of read_csv method. + - if exception is raised from method, the result returned is the exception. +""" + +def test_read_csv_without_encoding_kwarg(file): + try: + result = parsers.read_csv(file) + except Exception as e: + result = e + return result + + +def write_csv_file(filename, data, encoding, delimiter=",", newline=""): + with open(filename, "w", newline=newline, encoding=encoding) as csv_file: + writer = csv.writer(csv_file, delimiter=delimiter) + # for row in data: + writer.writerow(data) + return filename + + +def test(): + test_results = {} + test_dtypes = [ + "ascii", + "big5", + "big5hkscs", + "cp037", + "cp273", + "cp424", + "cp437", + "cp500", + "cp720", + "cp737", + "cp775", + "cp850", + "cp852", + "cp855", + "cp856", + "cp857", + "cp858", + "cp860", + "cp861", + "cp862", + "cp863", + "cp864", + "cp865", + "cp866", + "cp869", + "cp874", + "cp875", + "cp932", + "cp949", + "cp950", + "cp1006", + "cp1026", + "cp1125", + "cp1140", + "cp1250", + "cp1251", + "cp1252", + "cp1253", + "cp1254", + "cp1255", + "cp1256", + "cp1257", + "cp1258", + "cp65001", + "euc_jp", + "euc_jis_2004", + "euc_jisx0213", + "euc_kr", + "gb2312", + "gbk", + "gb18030", + "hz", + "iso2022_jp", + "iso2022_jp_1", + "iso2022_jp_2", + "iso2022_jp_2004", + "iso2022_jp_3", + "iso2022_jp_ext", + "iso2022_kr", + "latin_1", + "iso8859_2", + "iso8859_3", + "iso8859_4", + "iso8859_5", + "iso8859_6", + "iso8859_7", + "iso8859_8", + "iso8859_9", + "iso8859_10", + "iso8859_11", + "iso8859_13", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "johab", + "koi8_r", + "koi8_t", + "koi8_u", + "kz1048", + "mac_cyrillic", + "mac_greek", + "mac_iceland", + "mac_latin2", + "mac_roman", + "mac_turkish", + "ptcp154", + "shift_jis", + "shift_jis_2004", + "shift_jisx0213", + "utf_32", + "utf_32_be", + "utf_32_le", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_7", + "utf_8", + "utf_8_sig"] + + data = """ + one,two,three + 1,2,3 + uno,dos,tres + """ + for i, dtype in enumerate(test_dtypes): + file = write_csv_file(f"test{i}.csv", data, dtype) + result = test_read_csv_without_encoding_kwarg(file) + test_results[dtype] = result + + print("test results: ", test_results) + +if __name__ == "__main__": + test()
…ead_csv function, for issue 27655. - [] closes #xxxx - [ 1 ] tests added / passed - [ 1 ] passes `black pandas` - *[ 0 ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ 0 ] whatsnew entry Do not close issue, waiting for user response before further testing. * Returns: fatal: bad revision 'upstream/master'
https://api.github.com/repos/pandas-dev/pandas/pulls/27978
2019-08-17T23:46:32Z
2019-08-18T00:06:02Z
null
2019-08-18T00:06:03Z
Series.rename now handles values by Series constructor
diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index fdceaa5868cec..9e1b08879b098 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1628,3 +1628,19 @@ when plotting a large number of points. :suppress: plt.close('all') + +Examples +~~~~~~~~ + +In order to understand how two variables are correlated, the best fit line +is a good way. You can use ``seaborn.lmplot()`` method that combines ``regplot()`` +and ``FacetGrid`` to plot data and regression model fits across a FacetGrid. + +.. ipython:: python + :suppress: + + import seaborn as sns + df4 = pd.DataFrame({'a': np.random.randn(100) + 1, 'b': np.random.randn(100), + 'c': np.random.randn(100) - 1}, columns=['a', 'b', 'c']) + + sns.lmplot(x="a", y="b", data=df4) diff --git a/pandas/core/series.py b/pandas/core/series.py index c891298d6e499..6ea8f6db60e92 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4165,12 +4165,10 @@ def rename(self, index=None, **kwargs): """ kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace") - non_mapping = is_scalar(index) or ( - is_list_like(index) and not is_dict_like(index) - ) - if non_mapping: + if callable(index) or is_dict_like(index): + return super().rename(index=index, **kwargs) + else: return self._set_name(index, inplace=kwargs.get("inplace")) - return super().rename(index=index, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.reindex.__doc__) diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..86196b50c4726 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -269,6 +269,10 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): expected. A local file could be: ``file://localhost/path/to/table.parquet``. + A file path can also be a directory name that contains multiple(partitioned) + parquet files (in addition to single file path). A directory path could be: + ``directory://usr/path/to/folder``. + If you want to pass in a path object, pandas accepts any ``os.PathLike``.
- [x] closes #27813 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27975
2019-08-17T20:47:15Z
2019-08-27T22:19:16Z
null
2019-08-27T22:19:16Z
test case for transform with fillna #27905
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index d3972e6ba9008..2093fd28291b2 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1074,3 +1074,32 @@ def test_transform_lambda_with_datetimetz(): name="time", ) assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "inputDF, expectedDF", + [ + ( + DataFrame( + { + "A": [121, 121, 121, 121, 231, 231, 676], + "B": [1, 2, np.nan, 3, 3, np.nan, 4], + } + ), + 1, + ), + ( + DataFrame( + { + "A": [121, 121, 121, 121, 231, 231, 676], + "B": [1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0], + } + ), + 1, + ), + ], +) +def test_groupby_transform_fillna(inputDF, expectedDF): + # GH 27905 - Test fillna in groupby.transform + input1 = inputDF.groupby("A").transform(lambda x: x.fillna(x.mean())) + assert all(input1 == expectedDF)
- [x] closes #27905 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27974
2019-08-17T20:26:36Z
2019-10-22T01:37:11Z
null
2019-10-22T01:37:11Z
DOC: Added periods to end of docstrings in explode function
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 603a615c1f8cb..45a9869268cb8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6177,14 +6177,14 @@ def stack(self, level=-1, dropna=True): def explode(self, column: Union[str, Tuple]) -> "DataFrame": """ - Transform each element of a list-like to a row, replicating the - index values. + Transform each element of a list-like to a row, replicating index values. .. versionadded:: 0.25.0 Parameters ---------- column : str or tuple + Column to explode. Returns ------- @@ -6200,8 +6200,8 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame": See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) - index labels - DataFrame.melt : Unpivot a DataFrame from wide format to long format + index labels. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. Series.explode : Explode a DataFrame from list-like columns to long format. Notes diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..441b59aabb704 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3620,7 +3620,7 @@ def explode(self) -> "Series": Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. - DataFrame.melt : Unpivot a DataFrame from wide format to long format + DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format.
xref #23630
https://api.github.com/repos/pandas-dev/pandas/pulls/27973
2019-08-17T19:52:49Z
2019-08-26T02:21:36Z
2019-08-26T02:21:36Z
2019-08-30T15:37:19Z
Added Engine Disposal Documentation
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 63dd56f4a3793..4ec35c35f70f5 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -72,7 +72,6 @@ I/O - Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) -- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the HTML repr in the notebook (:issue:`27991`). Plotting ^^^^^^^^ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 44cb399336d62..97ed4cc3df1f8 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -140,10 +140,14 @@ def execute(sql, con, cur=None, params=None): ---------- sql : string SQL query to be executed. - con : SQLAlchemy connectable(engine/connection) or sqlite3 connection - Using SQLAlchemy makes it possible to use any DB supported by the - library. - If a DBAPI2 object, only sqlite3 is supported. + con : SQLAlchemy connectable (engine/connection) or database string URI + or DBAPI2 connection (fallback mode) + Using SQLAlchemy makes it possible to use any DB supported by that + library. Legacy support is provided for sqlite3.Connection objects. + + Note: The user is responsible for engine disposal and connection + closure for the SQLAlchemy connectable. See `EngineDisposal + <https://docs.sqlalchemy.org/en/13/core/connections.html?highlight=engine#engine-disposal>`_. cur : deprecated, cursor is obtained from connection, default: None params : list or tuple, optional, default: None List of parameters to pass to execute method. @@ -187,6 +191,10 @@ def read_sql_table( con : SQLAlchemy connectable or str A database URI could be provided as as str. SQLite DBAPI connection mode not supported. + + Note: The user is responsible for engine disposal and connection + closure for the SQLAlchemy connectable. See `EngineDisposal + <https://docs.sqlalchemy.org/en/13/core/connections.html?highlight=engine#engine-disposal>`_. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). @@ -280,11 +288,15 @@ def read_sql_query( ---------- sql : string SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. - con : SQLAlchemy connectable(engine/connection), database string URI, + con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection - Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object, only sqlite3 is supported. + Using SQLAlchemy makes it possible to use any DB supported by + that library. Legacy support is provided for sqlite3.Connection + objects. + + Note: The user is responsible for engine disposal and connection + closure for the SQLAlchemy connectable. See `EngineDisposal + <https://docs.sqlalchemy.org/en/13/core/connections.html?highlight=engine#engine-disposal>`_. index_col : string or list of strings, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : boolean, default True @@ -360,9 +372,12 @@ def read_sql( SQL query to be executed or a table name. con : SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) - Using SQLAlchemy makes it possible to use any DB supported by that - library. If a DBAPI2 object, only sqlite3 is supported. + library. Legacy support is provided for sqlite3.Connection objects. + + Note: The user is responsible for engine disposal and connection + closure for the SQLAlchemy connectable. See `EngineDisposal + <https://docs.sqlalchemy.org/en/13/core/connections.html?highlight=engine#engine-disposal>`_. index_col : string or list of strings, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : boolean, default True @@ -460,10 +475,6 @@ def to_sql( Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection - Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object, only sqlite3 is supported. - schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail'
Created hyperlink to SQLAlchemy docs for Engine Disposal responsibilities of user. Unified wording of legacy support for sqlite - [x] closes #23086 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27972
2019-08-17T19:17:56Z
2019-10-22T01:42:37Z
null
2019-10-22T01:42:37Z
Added try-except clause to catch numpy error.
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 3126b9d9d3e2e..51535db9208a0 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -223,7 +223,9 @@ def init_dict(data, index, columns, dtype=None): # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): - if dtype is None or np.issubdtype(dtype, np.flexible): + if is_categorical_dtype(dtype): + nan_dtype = dtype + elif dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 00be13b1c0e72..aa0bc21ec78ee 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -69,6 +69,10 @@ def test_empty_frame_dtypes_ftypes(self): norows_df = pd.DataFrame(columns=list("abc")) assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc"))) + cat = pd.CategoricalDtype() + norows_cat_df = pd.DataFrame(columns=list("abc"), dtype=cat) + assert_series_equal(norows_cat_df.dtypes, pd.Series(cat, index=list("abc"))) + # GH 26705 - Assert .ftypes is deprecated with tm.assert_produces_warning(FutureWarning): assert_series_equal(
- [x] closes #27953 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27970
2019-08-17T18:38:32Z
2019-10-03T19:11:07Z
null
2019-10-03T19:11:07Z
issue 27904
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ea2bd22cccc3d..23f832464e6ce 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1143,6 +1143,11 @@ def nunique(self, dropna=True): val = self.obj._internal_get_values() + if dropna: + idx = notna(val) + val = val[idx] + ids = ids[idx] + try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes
- [x] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27969
2019-08-17T18:37:15Z
2019-08-17T20:39:28Z
null
2023-05-11T01:19:10Z
BUG: don't cache pandas matplotlib converters (#27036)
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 15648d59c8f98..59e66635c2553 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -67,7 +67,13 @@ def register(explicit=True): converter = cls() if type_ in units.registry: previous = units.registry[type_] - _mpl_units[type_] = previous + # Exclude our converters from caching to make this idempotent. + if type(previous) not in { + DatetimeConverter, + PeriodConverter, + TimeConverter, + }: + _mpl_units[type_] = previous units.registry[type_] = converter @@ -80,9 +86,7 @@ def deregister(): # restore the old keys for unit, formatter in _mpl_units.items(): - if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: - # make it idempotent by excluding ours. - units.registry[unit] = formatter + units.registry[unit] = formatter def _check_implicitly_registered():
- [ ] closes #27036 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry #27036 is caused by pandas converters replacing default converters in the cache when we register our matplotlib converters.
https://api.github.com/repos/pandas-dev/pandas/pulls/27968
2019-08-17T18:19:34Z
2019-08-23T03:13:24Z
null
2019-08-23T03:13:24Z
DOC: Added documentation to name and fastpath arguments in series.py
diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..a0fa7ab84e44c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -170,8 +170,13 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. + name : str, default None + Sets the name of the array/ series. Can be called with Series.name copy : bool, default False Copy input data. + fastpath : bool, default False + Internally used to prevent the over-parsing data format/ type which + is otherwise required by external calls (such as user calls) """ _metadata = ["name"]
- [X] closes #27178 - [N/A] tests added / passed - [N/A] passes `black pandas` - [N/A] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [N/A] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27967
2019-08-17T17:44:28Z
2019-08-19T14:01:21Z
null
2019-08-19T14:01:34Z
DOC: Examples added for different types of plots in visualizations user guide
diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index fdceaa5868cec..91b1285a2e791 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1628,3 +1628,36 @@ when plotting a large number of points. :suppress: plt.close('all') + +Examples +~~~~~~~~ + +.. ipython:: python + + import seaborn as sns + +In order to understand how two variables are correlated, the best fit line +is a good way. You can use ``seaborn.lmplot()`` method that combines ``regplot()`` +and ``FacetGrid`` to plot data and regression model fits across a FacetGrid. + +.. ipython:: python + :suppress: + + df4 = pd.DataFrame({'a': np.random.randn(100) + 1, 'b': np.random.randn(100), + 'c': np.random.randn(100) - 1}, columns=['a', 'b', 'c']) + + sns.lmplot(x="a", y="b", data=df4) + + +To understand the correlation between the possible pairs of numeric variables i.e. +bivariate analysis, we can use ``seaborn.pairplot()`` method for a pairwise plot. + +.. ipython:: python + :suppress: + + df = sns.load_dataset('iris') + + plt.figure(figsize=(10, 8), dpi=80) + sns.pairplot(df, kind='scatter', hue='species', plot_kws=dict(s=80, + edgecolor="white", linewidth=2.5)) + plt.show() diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..8b6c963e40e9d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4165,12 +4165,10 @@ def rename(self, index=None, **kwargs): """ kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace") - non_mapping = is_scalar(index) or ( - is_list_like(index) and not is_dict_like(index) - ) - if non_mapping: + if callable(index) or is_dict_like(index): + return super().rename(index=index, **kwargs) + else: return self._set_name(index, inplace=kwargs.get("inplace")) - return super().rename(index=index, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.reindex.__doc__) diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..86196b50c4726 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -269,6 +269,10 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): expected. A local file could be: ``file://localhost/path/to/table.parquet``. + A file path can also be a directory name that contains multiple(partitioned) + parquet files (in addition to single file path). A directory path could be: + ``directory://usr/path/to/folder``. + If you want to pass in a path object, pandas accepts any ``os.PathLike``.
- [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27966
2019-08-17T17:21:46Z
2019-08-19T14:05:43Z
null
2019-08-19T14:05:44Z
updated default quoting on to_csv function to csv.QUOTE_NONNUMERIC
diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000000000..348dd30b57174 --- /dev/null +++ b/Pipfile @@ -0,0 +1,13 @@ +[[source]] +name = "pypi" +url = "https://pypi.org/simple" +verify_ssl = true + +[dev-packages] + +[packages] +cython = "*" +numpy = "*" + +[requires] +python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000000000..e337df5d95879 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,77 @@ +{ + "_meta": { + "hash": { + "sha256": "37b893c747c9087a0b0c9f836cde1aeb0f90fa4392afcb378b02af41b5a6ba0a" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.7" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "cython": { + "hashes": [ + "sha256:07efba7b32c082c519b75e3b03821c2f32848e2b3e9986c784bbd8ffaf0666d7", + "sha256:08db41daf18fabf7b7a85e39aa26954f6246994540043194af026c0df65a4942", + "sha256:19bbe3caf885a1d2e2c30eacc10d1e45dbbefb156493fe1d5d1adc1668cc1269", + "sha256:1c574f2f2ba760b82b2bcf6262e77e75589247dc5ef796a3ff1b2213e50ee452", + "sha256:1dfe672c686e34598bdbaa93c3b30acb3720ae9258232a4f68ba04ee9969063d", + "sha256:283faea84e6c4e54c3f5c8ff89aa2b6c1c3a813aad4f6d48ed3b9cc9043ef9f9", + "sha256:2a145888d0942e7c36e86a7b7c7e2923cb9f7055805a3b72dcb137e3efdb0979", + "sha256:3f75065936e16569d6e13dfd76de988f5eabeae460aa54770c9b961ab6f747fc", + "sha256:4d78124f5f281f1d5d5b7919cbbc65a7073ff93562def81ee78a8307e6e72494", + "sha256:5ba4d088b8e5d59b8a5911ca9c72952acf3c83296b57daf75af92fb2af1e8423", + "sha256:6b19daeda1d5d1dfc973b291246f6a63a663b20c33980724d6d073c562719536", + "sha256:790c7dc80fd1c3e38acefe06027e2f5a8466c128c7e47c6e140fd5316132574d", + "sha256:7f8c4e648881454ba3ba0bcf3b21a9e1878a67d20ea2b8d9ec1c4c628592ab6b", + "sha256:8bcd3f597290f9902548d6355898d7e376e7f3762f89db9cd50b2b58429df9e8", + "sha256:8ffb18f71972a5c718a8600d9f52e3507f0d6fb72a978e03270d34a7035c98fb", + "sha256:92f025df1cb391e09f65775598c7dfb7efad72d74713775db54e267f62ca94a1", + "sha256:93cf1c72472a2fd0ef4c52f6074dab08fc28d475b9c824ba73a52701f7a48ae1", + "sha256:9a7fa692cdc967fdbf6a053c1975137d01f6935dede2ef222c71840b290caf79", + "sha256:a68eb0c1375f2401de881692b30370a51e550052b8e346b2f71bbdbdc74a214f", + "sha256:ac3b7a12ddd52ea910ee3a041e6bc65df7a52f0ba7bd10fb7123502af482c152", + "sha256:b402b700edaf571a0bae18ec35d5b71c266873a6616412b672435c10b6d8f041", + "sha256:c29d069a4a30f472482343c866f7486731ad638ef9af92bfe5fca9c7323d638e", + "sha256:d822311498f185db449b687336b4e5db7638c8d8b03bdf10ae91d74e23c7cc0c", + "sha256:dccc8df9e1ac158b06777bbaaeb4516f245f9b147701ae25e6023960e4a0c2a3", + "sha256:e31f4b946c2765b2f35440fdb4b00c496dfc5babc53c7ae61966b41171d1d59f", + "sha256:eb43f9e582cc221ee2832e25ea6fe5c06f2acc9da6353c562e922f107db12af8", + "sha256:f07822248110fd6213db8bc2745fdbbccef6f2b3d18ac91a7fba29c6bc575da5", + "sha256:ff69854f123b959d4ae14bd5330714bb9ee4360052992dc0fbd0a3dee4261f95" + ], + "index": "pypi", + "version": "==0.29.13" + }, + "numpy": { + "hashes": [ + "sha256:03e311b0a4c9f5755da7d52161280c6a78406c7be5c5cc7facfbcebb641efb7e", + "sha256:0cdd229a53d2720d21175012ab0599665f8c9588b3b8ffa6095dd7b90f0691dd", + "sha256:312bb18e95218bedc3563f26fcc9c1c6bfaaf9d453d15942c0839acdd7e4c473", + "sha256:464b1c48baf49e8505b1bb754c47a013d2c305c5b14269b5c85ea0625b6a988a", + "sha256:5adfde7bd3ee4864536e230bcab1c673f866736698724d5d28c11a4d63672658", + "sha256:7724e9e31ee72389d522b88c0d4201f24edc34277999701ccd4a5392e7d8af61", + "sha256:8d36f7c53ae741e23f54793ffefb2912340b800476eb0a831c6eb602e204c5c4", + "sha256:910d2272403c2ea8a52d9159827dc9f7c27fb4b263749dca884e2e4a8af3b302", + "sha256:951fefe2fb73f84c620bec4e001e80a80ddaa1b84dce244ded7f1e0cbe0ed34a", + "sha256:9588c6b4157f493edeb9378788dcd02cb9e6a6aeaa518b511a1c79d06cbd8094", + "sha256:9ce8300950f2f1d29d0e49c28ebfff0d2f1e2a7444830fbb0b913c7c08f31511", + "sha256:be39cca66cc6806652da97103605c7b65ee4442c638f04ff064a7efd9a81d50a", + "sha256:c3ab2d835b95ccb59d11dfcd56eb0480daea57cdf95d686d22eff35584bc4554", + "sha256:eb0fc4a492cb896346c9e2c7a22eae3e766d407df3eb20f4ce027f23f76e4c54", + "sha256:ec0c56eae6cee6299f41e780a0280318a93db519bbb2906103c43f3e2be1206c", + "sha256:f4e4612de60a4f1c4d06c8c2857cdcb2b8b5289189a12053f37d3f41f06c60d0" + ], + "index": "pypi", + "version": "==1.17.0" + } + }, + "develop": {} +} diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba1c516b9b444..77a4c1f01076d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,3 +1,4 @@ +import csv import collections from datetime import timedelta import functools @@ -3062,7 +3063,7 @@ def to_csv( mode="w", encoding=None, compression="infer", - quoting=None, + quoting=csv.QUOTE_NONNUMERIC, quotechar='"', line_terminator=None, chunksize=None, @@ -3129,9 +3130,9 @@ def to_csv( 'infer' option added and set to default. quoting : optional constant from csv module - Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` - then floats are converted to strings and thus csv.QUOTE_NONNUMERIC - will treat them as non-numeric. + Defaults to csv.QUOTE_NONNUMERIC will treat values as non-numeric. + If set to csv.QUOTE_MINIMAL, then if you have set a `float_format` + then floats are converted to strings. quotechar : str, default '\"' String of length 1. Character used to quote fields. line_terminator : str, optional diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..958198a56a9c4 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -264,8 +264,9 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): Parameters ---------- path : str, path object or file-like object - Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is + Any valid string path is acceptable. Path to directory of partitioned + parquet files is acceptable. The string could be a URL. Valid URL + schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. diff --git a/test.csv b/test.csv new file mode 100644 index 0000000000000..40bf71fd6fa56 --- /dev/null +++ b/test.csv @@ -0,0 +1,3 @@ +"","a","b" +0,"a1 ","b1" +1,"a2","b2"
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27965
2019-08-17T17:14:53Z
2019-08-23T13:46:36Z
null
2019-08-23T13:46:36Z
added names, fastpath parameters explanation to pandas.Series
diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..b6fc9ae82048b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -170,6 +170,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. + name : str, optional + The name to give to the Series. copy : bool, default False Copy input data. """
- [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27964
2019-08-17T16:56:23Z
2019-11-08T16:42:42Z
2019-11-08T16:42:41Z
2019-11-08T16:42:47Z
Fix/temp azure
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 263a87176a9c9..6846791b0d595 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -122,7 +122,7 @@ jobs: - script: | source activate pandas-dev # Next we should simply have `doc/make.py --warnings-are-errors`, everything else is required because the ipython directive doesn't fail the build on errors (https://github.com/ipython/ipython/issues/11547) - doc/make.py --warnings-are-errors | tee sphinx.log ; SPHINX_RET=${PIPESTATUS[0]} + doc/make.py --num-jobs=2 --warnings-are-errors | tee sphinx.log ; SPHINX_RET=${PIPESTATUS[0]} grep -B1 "^<<<-------------------------------------------------------------------------$" sphinx.log ; IPY_RET=$(( $? != 1 )) exit $(( $SPHINX_RET + $IPY_RET )) displayName: 'Build documentation' @@ -149,21 +149,21 @@ jobs: # 4. Upload the private key (the name of the file must match with the specified in "sshKeySecureFile" input below, "pandas_docs_key") # 5. Click on file name after it is created, tick the box "Authorize for use in all pipelines" and save # 6. The public key specified in "sshPublicKey" is the pair of the uploaded private key, and needs to be set as a deploy key of the repo where the docs will be pushed (with write access): https://github.com/pandas-dev/pandas-dev.github.io/settings/keys - - task: InstallSSHKey@0 - inputs: - hostName: 'github.com,192.30.252.128 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==' - sshPublicKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHmz3l/EdqrgNxEUKkwDUuUcLv91unig03pYFGO/DMIgCmPdMG96zAgfnESd837Rm0wSSqylwSzkRJt5MV/TpFlcVifDLDQmUhqCeO8Z6dLl/oe35UKmyYICVwcvQTAaHNnYRpKC5IUlTh0JEtw9fGlnp1Ta7U1ENBLbKdpywczElhZu+hOQ892zqOj3CwA+U2329/d6cd7YnqIKoFN9DWT3kS5K6JE4IoBfQEVekIOs23bKjNLvPoOmi6CroAhu/K8j+NCWQjge5eJf2x/yTnIIP1PlEcXoHIr8io517posIx3TBup+CN8bNS1PpDW3jyD3ttl1uoBudjOQrobNnJeR6Rn67DRkG6IhSwr3BWj8alwUG5mTdZzwV5Pa9KZFdIiqX7NoDGg+itsR39QCn0thK8lGRNSR8KrWC1PSjecwelKBO7uQ7rnk/rkrZdBWR4oEA8YgNH8tirUw5WfOr5a0AIaJicKxGKNdMxZt+zmC+bS7F4YCOGIm9KHa43RrKhoGRhRf9fHHHKUPwFGqtWG4ykcUgoamDOURJyepesBAO3FiRE9rLU6ILbB3yEqqoekborHmAJD5vf7PWItW3Q/YQKuk3kkqRcKnexPyzyyq5lUgTi8CxxZdaASIOu294wjBhhdyHlXEkVTNJ9JKkj/obF+XiIIp0cBDsOXY9hDQ== pandas-dev@python.org' - sshKeySecureFile: 'pandas_docs_key' - displayName: 'Install GitHub ssh deployment key' - condition : | - and(not(eq(variables['Build.Reason'], 'PullRequest')), - eq(variables['Build.SourceBranch'], 'refs/heads/master')) - - - script: | - cd doc/build/html - git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git - git push -f origin master - displayName: 'Publish docs to GitHub pages' - condition : | - and(not(eq(variables['Build.Reason'], 'PullRequest')), - eq(variables['Build.SourceBranch'], 'refs/heads/master')) +# - task: InstallSSHKey@0 +# inputs: +# hostName: 'github.com,192.30.252.128 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==' +# sshPublicKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDHmz3l/EdqrgNxEUKkwDUuUcLv91unig03pYFGO/DMIgCmPdMG96zAgfnESd837Rm0wSSqylwSzkRJt5MV/TpFlcVifDLDQmUhqCeO8Z6dLl/oe35UKmyYICVwcvQTAaHNnYRpKC5IUlTh0JEtw9fGlnp1Ta7U1ENBLbKdpywczElhZu+hOQ892zqOj3CwA+U2329/d6cd7YnqIKoFN9DWT3kS5K6JE4IoBfQEVekIOs23bKjNLvPoOmi6CroAhu/K8j+NCWQjge5eJf2x/yTnIIP1PlEcXoHIr8io517posIx3TBup+CN8bNS1PpDW3jyD3ttl1uoBudjOQrobNnJeR6Rn67DRkG6IhSwr3BWj8alwUG5mTdZzwV5Pa9KZFdIiqX7NoDGg+itsR39QCn0thK8lGRNSR8KrWC1PSjecwelKBO7uQ7rnk/rkrZdBWR4oEA8YgNH8tirUw5WfOr5a0AIaJicKxGKNdMxZt+zmC+bS7F4YCOGIm9KHa43RrKhoGRhRf9fHHHKUPwFGqtWG4ykcUgoamDOURJyepesBAO3FiRE9rLU6ILbB3yEqqoekborHmAJD5vf7PWItW3Q/YQKuk3kkqRcKnexPyzyyq5lUgTi8CxxZdaASIOu294wjBhhdyHlXEkVTNJ9JKkj/obF+XiIIp0cBDsOXY9hDQ== pandas-dev@python.org' +# sshKeySecureFile: 'pandas_docs_key' +# displayName: 'Install GitHub ssh deployment key' +# condition : | +# and(not(eq(variables['Build.Reason'], 'PullRequest')), +# eq(variables['Build.SourceBranch'], 'refs/heads/master')) +# +# - script: | +# cd doc/build/html +# git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git +# git push -f origin master +# displayName: 'Publish docs to GitHub pages' +# condition : | +# and(not(eq(variables['Build.Reason'], 'PullRequest')), +# eq(variables['Build.SourceBranch'], 'refs/heads/master')) diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 88742e0483c7e..88f09d0175fd9 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -116,7 +116,7 @@ conda list pandas # Make sure any error below is reported as such echo "Build extensions and install pandas" -python setup.py build_ext -q --inplace +python setup.py build_ext -q --inplace -j 2 python -m pip install -e . echo
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27963
2019-08-17T15:13:06Z
2019-08-17T15:13:23Z
null
2019-08-17T15:13:23Z
BUG: TimedeltaArray - Index result.name
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b983117478c61..415255cdbad06 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2325,7 +2325,10 @@ def __sub__(self, other): return Index(np.array(self) - other) def __rsub__(self, other): - return Index(other - np.array(self)) + # wrap Series to ensure we pin name correctly + from pandas import Series + + return Index(other - Series(self)) def __and__(self, other): return self.intersection(other) diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index a3bfb2e10bb66..523ba5d42a69c 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -74,8 +74,9 @@ def masked_arith_op(x, y, op): result[mask] = op(xrav[mask], yrav[mask]) else: - assert is_scalar(y), type(y) - assert isinstance(x, np.ndarray), type(x) + if not is_scalar(y): + raise TypeError(type(y)) + # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 33a5d45df3885..6d6b85a1e81e1 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1378,8 +1378,12 @@ def test_td64arr_add_offset_array(self, box): @pytest.mark.parametrize( "names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")] ) - def test_td64arr_sub_offset_index(self, names, box): + def test_td64arr_sub_offset_index(self, names, box_with_array): # GH#18824, GH#19744 + box = box_with_array + xbox = box if box is not tm.to_array else pd.Index + exname = names[2] if box is not tm.to_array else names[1] + if box is pd.DataFrame and names[1] == "bar": pytest.skip( "Name propagation for DataFrame does not behave like " @@ -1390,11 +1394,11 @@ def test_td64arr_sub_offset_index(self, names, box): other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1]) expected = TimedeltaIndex( - [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=names[2] + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname ) tdi = tm.box_expected(tdi, box) - expected = tm.box_expected(expected, box) + expected = tm.box_expected(expected, xbox) # The DataFrame operation is transposed and so operates as separate # scalar operations, which do not issue a PerformanceWarning
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27962
2019-08-17T02:59:37Z
2019-08-19T21:01:51Z
2019-08-19T21:01:50Z
2019-08-19T21:04:50Z
pandas.read_parquet now has info that it accepts path_to_directory that contains multiple parquet files
diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 6fc70e9f4a737..86196b50c4726 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -269,6 +269,10 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): expected. A local file could be: ``file://localhost/path/to/table.parquet``. + A file path can also be a directory name that contains multiple(partitioned) + parquet files (in addition to single file path). A directory path could be: + ``directory://usr/path/to/folder``. + If you want to pass in a path object, pandas accepts any ``os.PathLike``.
- [x] closes #27820 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27961
2019-08-17T01:41:48Z
2019-10-22T01:45:52Z
null
2019-10-22T01:45:52Z
DOC: Add missing parameters to Series constructor
diff --git a/pandas/core/series.py b/pandas/core/series.py index 3f04970ee4e58..da62985019f4d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -170,8 +170,14 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. + name : str + This is the variable name of the series. If it in a DataFrame, the column + will be named according to this name parameter. + copy : bool, default False Copy input data. + + fastpath: internal paraemter """ _metadata = ["name"]
Added documentation to pandas.Series for "name" and "fastpath" parameters.
https://api.github.com/repos/pandas-dev/pandas/pulls/27960
2019-08-17T01:00:28Z
2019-08-27T22:25:13Z
null
2019-08-27T22:25:13Z
REF: use dispatch_to_extension_op for bool ops
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index dbcf09a401f27..e917a5c999238 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -818,6 +818,11 @@ def wrapper(self, other): # Defer to DataFrame implementation; fail early return NotImplemented + elif should_extension_dispatch(self, other): + # e.g. SparseArray + res_values = dispatch_to_extension_op(op, self, other) + return _construct_result(self, res_values, index=self.index, name=res_name) + elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other)
Along with #27912 this completes the process of doing this for all Series ops. From there we can move the array-specific components to array_ops and define the PandasArray ops appropriately.
https://api.github.com/repos/pandas-dev/pandas/pulls/27959
2019-08-17T00:26:59Z
2019-09-02T21:26:54Z
2019-09-02T21:26:53Z
2019-09-03T01:50:14Z
TST: update pandas_datareader dep, fix xfail
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 19002cbb8575e..fc83fcad88fb6 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -48,5 +48,5 @@ dependencies: - pip: - brotlipy - coverage - - pandas-datareader + - pandas-datareader>=0.7.4 - python-dateutil diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 93baafddedeb4..2c06d20e4cd6c 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -103,7 +103,6 @@ def test_pandas_gbq(df): pandas_gbq = import_module("pandas_gbq") # noqa -@pytest.mark.xfail(reason="0.7.0 pending") @tm.network def test_pandas_datareader():
Locally the test fails because Quandl wants an API key, so fixing this test may be a multi-step process
https://api.github.com/repos/pandas-dev/pandas/pulls/27958
2019-08-16T22:50:01Z
2019-08-20T23:40:37Z
null
2021-11-20T23:22:01Z
Backport PR #27956 on branch 0.25.x (TST: xfail on 37, win)
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 9b8c8e6d8a077..99cc4cf0ffbd1 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.compat import PY37, is_platform_windows + import pandas as pd from pandas import ( Categorical, @@ -208,6 +210,9 @@ def test_level_get_group(observed): # GH#21636 previously flaky on py37 +@pytest.mark.xfail( + is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False +) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138
Backport PR #27956: TST: xfail on 37, win
https://api.github.com/repos/pandas-dev/pandas/pulls/27957
2019-08-16T20:07:19Z
2019-08-17T20:20:30Z
2019-08-17T20:20:30Z
2019-08-17T20:20:31Z
TST: xfail on 37, win
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 756de3edd33dd..b5c2de267869d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.compat import PY37, is_platform_windows + import pandas as pd from pandas import ( Categorical, @@ -208,6 +210,9 @@ def test_level_get_group(observed): # GH#21636 previously flaky on py37 +@pytest.mark.xfail( + is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False +) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138
Closes https://github.com/pandas-dev/pandas/issues/27902
https://api.github.com/repos/pandas-dev/pandas/pulls/27956
2019-08-16T19:16:26Z
2019-08-16T20:06:36Z
2019-08-16T20:06:35Z
2019-08-16T20:06:39Z
BUG: Remove null values before sorting during groupby nunique calculation
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 58892b316c940..2f72de25c579b 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -97,7 +97,7 @@ Datetimelike - Bug in :meth:`Series.__setitem__` incorrectly casting ``np.timedelta64("NaT")`` to ``np.datetime64("NaT")`` when inserting into a :class:`Series` with datetime64 dtype (:issue:`27311`) - Bug in :meth:`Series.dt` property lookups when the underlying data is read-only (:issue:`27529`) - Bug in ``HDFStore.__getitem__`` incorrectly reading tz attribute created in Python 2 (:issue:`26443`) -- +- Bug in :meth:`pandas.core.groupby.SeriesGroupBy.nunique` where ``NaT`` values were interfering with the count of unique values (:issue:`27951`) Timedelta diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c0436e9389078..e514162f84c37 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1147,6 +1147,10 @@ def nunique(self, dropna=True): val = self.obj._internal_get_values() + # GH 27951 + # temporary fix while we wait for NumPy bug 12629 to be fixed + val[isna(val)] = np.datetime64("NaT") + try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index d89233f2fd603..afb22a732691c 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1,4 +1,5 @@ import builtins +import datetime as dt from io import StringIO from itertools import product from string import ascii_lowercase @@ -9,7 +10,16 @@ from pandas.errors import UnsupportedFunctionCall import pandas as pd -from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna +from pandas import ( + DataFrame, + Index, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, + isna, +) import pandas.core.nanops as nanops from pandas.util import _test_decorators as td, testing as tm @@ -1015,6 +1025,42 @@ def test_nunique_with_timegrouper(): tm.assert_series_equal(result, expected) +@pytest.mark.parametrize( + "key, data, dropna, expected", + [ + ( + ["x", "x", "x"], + [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "y", "y"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + False, + Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "x", "y"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + False, + Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ], +) +def test_nunique_with_NaT(key, data, dropna, expected): + # GH 27951 + df = pd.DataFrame({"key": key, "data": data}) + result = df.groupby(["key"])["data"].nunique(dropna=dropna) + tm.assert_series_equal(result, expected) + + def test_nunique_preserves_column_level_names(): # GH 23222 test = pd.DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0"))
- [x] Closes #27904 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27951
2019-08-16T16:52:28Z
2019-09-07T11:29:41Z
2019-09-07T11:29:40Z
2019-09-07T12:17:38Z
TST: parametrize and de-duplicate arith tests
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py index f047154f2c636..774ff14398bdb 100644 --- a/pandas/tests/arithmetic/conftest.py +++ b/pandas/tests/arithmetic/conftest.py @@ -190,7 +190,12 @@ def box(request): @pytest.fixture( - params=[pd.Index, pd.Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail)], + params=[ + pd.Index, + pd.Series, + pytest.param(pd.DataFrame, marks=pytest.mark.xfail), + tm.to_array, + ], ids=id_func, ) def box_df_fail(request): @@ -206,6 +211,7 @@ def box_df_fail(request): (pd.Series, False), (pd.DataFrame, False), pytest.param((pd.DataFrame, True), marks=pytest.mark.xfail), + (tm.to_array, False), ], ids=id_func, ) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 5931cd93cc8c5..bc7b979d2c7d0 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -348,28 +348,6 @@ def test_dt64arr_timestamp_equality(self, box_with_array): expected = tm.box_expected([False, False], xbox) tm.assert_equal(result, expected) - @pytest.mark.parametrize( - "op", - [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le], - ) - def test_comparison_tzawareness_compat(self, op): - # GH#18162 - dr = pd.date_range("2016-01-01", periods=6) - dz = dr.tz_localize("US/Pacific") - - # Check that there isn't a problem aware-aware and naive-naive do not - # raise - naive_series = Series(dr) - aware_series = Series(dz) - msg = "Cannot compare tz-naive and tz-aware" - with pytest.raises(TypeError, match=msg): - op(dz, naive_series) - with pytest.raises(TypeError, match=msg): - op(dr, aware_series) - - # TODO: implement _assert_tzawareness_compat for the reverse - # comparison with the Series on the left-hand side - class TestDatetimeIndexComparisons: @@ -599,15 +577,18 @@ def test_comparison_tzawareness_compat(self, op, box_df_fail): with pytest.raises(TypeError, match=msg): op(dz, np.array(list(dr), dtype=object)) - # Check that there isn't a problem aware-aware and naive-naive do not - # raise + # The aware==aware and naive==naive comparisons should *not* raise assert_all(dr == dr) - assert_all(dz == dz) + assert_all(dr == list(dr)) + assert_all(list(dr) == dr) + assert_all(np.array(list(dr), dtype=object) == dr) + assert_all(dr == np.array(list(dr), dtype=object)) - # FIXME: DataFrame case fails to raise for == and !=, wrong - # message for inequalities - assert (dr == list(dr)).all() - assert (dz == list(dz)).all() + assert_all(dz == dz) + assert_all(dz == list(dz)) + assert_all(list(dz) == dz) + assert_all(np.array(list(dz), dtype=object) == dz) + assert_all(dz == np.array(list(dz), dtype=object)) @pytest.mark.parametrize( "op", @@ -844,6 +825,7 @@ def test_dt64arr_isub_timedeltalike_scalar( rng -= two_hours tm.assert_equal(rng, expected) + # TODO: redundant with test_dt64arr_add_timedeltalike_scalar def test_dt64arr_add_td64_scalar(self, box_with_array): # scalar timedeltas/np.timedelta64 objects # operate with np.timedelta64 correctly @@ -1709,14 +1691,12 @@ def test_operators_datetimelike(self): dt1 - dt2 dt2 - dt1 - # ## datetime64 with timetimedelta ### + # datetime64 with timetimedelta dt1 + td1 td1 + dt1 dt1 - td1 - # TODO: Decide if this ought to work. - # td1 - dt1 - # ## timetimedelta with datetime64 ### + # timetimedelta with datetime64 td1 + dt1 dt1 + td1 @@ -1914,7 +1894,7 @@ def test_dt64_series_add_intlike(self, tz, op): with pytest.raises(TypeError, match=msg): method(other) with pytest.raises(TypeError, match=msg): - method(other.values) + method(np.array(other)) with pytest.raises(TypeError, match=msg): method(pd.Index(other)) @@ -2380,34 +2360,34 @@ def test_ufunc_coercions(self): idx = date_range("2011-01-01", periods=3, freq="2D", name="x") delta = np.timedelta64(1, "D") + exp = date_range("2011-01-02", periods=3, freq="2D", name="x") for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) - exp = date_range("2011-01-02", periods=3, freq="2D", name="x") tm.assert_index_equal(result, exp) assert result.freq == "2D" + exp = date_range("2010-12-31", periods=3, freq="2D", name="x") for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) - exp = date_range("2010-12-31", periods=3, freq="2D", name="x") tm.assert_index_equal(result, exp) assert result.freq == "2D" delta = np.array( [np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")] ) + exp = DatetimeIndex( + ["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x" + ) for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) - exp = DatetimeIndex( - ["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x" - ) tm.assert_index_equal(result, exp) assert result.freq == "3D" + exp = DatetimeIndex( + ["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x" + ) for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) - exp = DatetimeIndex( - ["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x" - ) tm.assert_index_equal(result, exp) assert result.freq == "D" diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2b23790e4ccd3..06ef1a30532ee 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -561,9 +561,9 @@ def test_div_int(self, numeric_idx): tm.assert_index_equal(result, expected) @pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv]) - def test_mul_int_identity(self, op, numeric_idx, box): + def test_mul_int_identity(self, op, numeric_idx, box_with_array): idx = numeric_idx - idx = tm.box_expected(idx, box) + idx = tm.box_expected(idx, box_with_array) result = op(idx, 1) tm.assert_equal(result, idx) @@ -615,8 +615,9 @@ def test_mul_size_mismatch_raises(self, numeric_idx): idx * np.array([1, 2]) @pytest.mark.parametrize("op", [operator.pow, ops.rpow]) - def test_pow_float(self, op, numeric_idx, box): + def test_pow_float(self, op, numeric_idx, box_with_array): # test power calculations both ways, GH#14973 + box = box_with_array idx = numeric_idx expected = pd.Float64Index(op(idx.values, 2.0)) @@ -626,8 +627,9 @@ def test_pow_float(self, op, numeric_idx, box): result = op(idx, 2.0) tm.assert_equal(result, expected) - def test_modulo(self, numeric_idx, box): + def test_modulo(self, numeric_idx, box_with_array): # GH#9244 + box = box_with_array idx = numeric_idx expected = Index(idx.values % 2) @@ -1041,7 +1043,8 @@ class TestObjectDtypeEquivalence: # Tests that arithmetic operations match operations executed elementwise @pytest.mark.parametrize("dtype", [None, object]) - def test_numarr_with_dtype_add_nan(self, dtype, box): + def test_numarr_with_dtype_add_nan(self, dtype, box_with_array): + box = box_with_array ser = pd.Series([1, 2, 3], dtype=dtype) expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) @@ -1055,7 +1058,8 @@ def test_numarr_with_dtype_add_nan(self, dtype, box): tm.assert_equal(result, expected) @pytest.mark.parametrize("dtype", [None, object]) - def test_numarr_with_dtype_add_int(self, dtype, box): + def test_numarr_with_dtype_add_int(self, dtype, box_with_array): + box = box_with_array ser = pd.Series([1, 2, 3], dtype=dtype) expected = pd.Series([2, 3, 4], dtype=dtype) diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index fd9db80671360..f9c1de115b3a4 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -89,7 +89,7 @@ def test_pow_ops_object(self): @pytest.mark.parametrize("op", [operator.add, ops.radd]) @pytest.mark.parametrize("other", ["category", "Int64"]) - def test_add_extension_scalar(self, other, box, op): + def test_add_extension_scalar(self, other, box_with_array, op): # GH#22378 # Check that scalars satisfying is_extension_array_dtype(obj) # do not incorrectly try to dispatch to an ExtensionArray operation @@ -97,8 +97,8 @@ def test_add_extension_scalar(self, other, box, op): arr = pd.Series(["a", "b", "c"]) expected = pd.Series([op(x, other) for x in arr]) - arr = tm.box_expected(arr, box) - expected = tm.box_expected(expected, box) + arr = tm.box_expected(arr, box_with_array) + expected = tm.box_expected(expected, box_with_array) result = op(arr, other) tm.assert_equal(result, expected) @@ -133,16 +133,17 @@ def test_objarr_radd_str(self, box): ], ) @pytest.mark.parametrize("dtype", [None, object]) - def test_objarr_radd_str_invalid(self, dtype, data, box): + def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): ser = Series(data, dtype=dtype) - ser = tm.box_expected(ser, box) + ser = tm.box_expected(ser, box_with_array) with pytest.raises(TypeError): "foo_" + ser @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) - def test_objarr_add_invalid(self, op, box): + def test_objarr_add_invalid(self, op, box_with_array): # invalid ops + box = box_with_array obj_ser = tm.makeObjectSeries() obj_ser.name = "objects" diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 33a5d45df3885..d28f05b4ab247 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -968,71 +968,37 @@ def test_td64arr_add_datetime64_nat(self, box_with_array): # ------------------------------------------------------------------ # Operations with int-like others - def test_td64arr_add_int_series_invalid(self, box): - tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") - tdser = tm.box_expected(tdser, box) - err = TypeError if box is not pd.Index else NullFrequencyError - int_ser = Series([2, 3, 4]) - - with pytest.raises(err): - tdser + int_ser - with pytest.raises(err): - int_ser + tdser - with pytest.raises(err): - tdser - int_ser - with pytest.raises(err): - int_ser - tdser - - def test_td64arr_add_intlike(self, box_with_array): - # GH#19123 - tdi = TimedeltaIndex(["59 days", "59 days", "NaT"]) - ser = tm.box_expected(tdi, box_with_array) - - err = TypeError - if box_with_array in [pd.Index, tm.to_array]: - err = NullFrequencyError - - other = Series([20, 30, 40], dtype="uint8") - - # TODO: separate/parametrize - with pytest.raises(err): - ser + 1 - with pytest.raises(err): - ser - 1 - - with pytest.raises(err): - ser + other - with pytest.raises(err): - ser - other - - with pytest.raises(err): - ser + np.array(other) - with pytest.raises(err): - ser - np.array(other) - - with pytest.raises(err): - ser + pd.Index(other) - with pytest.raises(err): - ser - pd.Index(other) - - @pytest.mark.parametrize("scalar", [1, 1.5, np.array(2)]) - def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar): + @pytest.mark.parametrize( + "other", + [ + # GH#19123 + 1, + Series([20, 30, 40], dtype="uint8"), + np.array([20, 30, 40], dtype="uint8"), + pd.UInt64Index([20, 30, 40]), + pd.Int64Index([20, 30, 40]), + Series([2, 3, 4]), + 1.5, + np.array(2), + ], + ) + def test_td64arr_addsub_numeric_invalid(self, box_with_array, other): box = box_with_array - tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") tdser = tm.box_expected(tdser, box) + err = TypeError - if box in [pd.Index, tm.to_array] and not isinstance(scalar, float): + if box in [pd.Index, tm.to_array] and not isinstance(other, float): err = NullFrequencyError with pytest.raises(err): - tdser + scalar + tdser + other with pytest.raises(err): - scalar + tdser + other + tdser with pytest.raises(err): - tdser - scalar + tdser - other with pytest.raises(err): - scalar - tdser + other - tdser @pytest.mark.parametrize( "dtype", @@ -1059,11 +1025,12 @@ def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar): ], ids=lambda x: type(x).__name__, ) - def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype): + def test_td64arr_add_sub_numeric_arr_invalid(self, box_with_array, vec, dtype): + box = box_with_array tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") tdser = tm.box_expected(tdser, box) err = TypeError - if box is pd.Index and not dtype.startswith("float"): + if box in [pd.Index, tm.to_array] and not dtype.startswith("float"): err = NullFrequencyError vector = vec.astype(dtype) @@ -1080,14 +1047,6 @@ def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype): # Operations with timedelta-like others # TODO: this was taken from tests.series.test_ops; de-duplicate - @pytest.mark.parametrize( - "scalar_td", - [ - timedelta(minutes=5, seconds=4), - Timedelta(minutes=5, seconds=4), - Timedelta("5m4s").to_timedelta64(), - ], - ) def test_operators_timedelta64_with_timedelta(self, scalar_td): # smoke tests td1 = Series([timedelta(minutes=5, seconds=3)] * 3) @@ -1141,7 +1100,8 @@ def test_timedelta64_operations_with_timedeltas(self): # roundtrip tm.assert_series_equal(result + td2, td1) - def test_td64arr_add_td64_array(self, box): + def test_td64arr_add_td64_array(self, box_with_array): + box = box_with_array dti = pd.date_range("2016-01-01", periods=3) tdi = dti - dti.shift(1) tdarr = tdi.values @@ -1155,7 +1115,8 @@ def test_td64arr_add_td64_array(self, box): result = tdarr + tdi tm.assert_equal(result, expected) - def test_td64arr_sub_td64_array(self, box): + def test_td64arr_sub_td64_array(self, box_with_array): + box = box_with_array dti = pd.date_range("2016-01-01", periods=3) tdi = dti - dti.shift(1) tdarr = tdi.values @@ -1229,8 +1190,9 @@ def test_td64arr_add_sub_tdi(self, box, names): else: assert result.dtypes[0] == "timedelta64[ns]" - def test_td64arr_add_sub_td64_nat(self, box): + def test_td64arr_add_sub_td64_nat(self, box_with_array): # GH#23320 special handling for timedelta64("NaT") + box = box_with_array tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")]) other = np.timedelta64("NaT") expected = pd.TimedeltaIndex(["NaT"] * 2) @@ -1247,8 +1209,9 @@ def test_td64arr_add_sub_td64_nat(self, box): result = other - obj tm.assert_equal(result, expected) - def test_td64arr_sub_NaT(self, box): + def test_td64arr_sub_NaT(self, box_with_array): # GH#18808 + box = box_with_array ser = Series([NaT, Timedelta("1s")]) expected = Series([NaT, NaT], dtype="timedelta64[ns]") @@ -1258,8 +1221,9 @@ def test_td64arr_sub_NaT(self, box): res = ser - pd.NaT tm.assert_equal(res, expected) - def test_td64arr_add_timedeltalike(self, two_hours, box): + def test_td64arr_add_timedeltalike(self, two_hours, box_with_array): # only test adding/sub offsets as + is now numeric + box = box_with_array rng = timedelta_range("1 days", "10 days") expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") rng = tm.box_expected(rng, box) @@ -1268,8 +1232,9 @@ def test_td64arr_add_timedeltalike(self, two_hours, box): result = rng + two_hours tm.assert_equal(result, expected) - def test_td64arr_sub_timedeltalike(self, two_hours, box): + def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array): # only test adding/sub offsets as - is now numeric + box = box_with_array rng = timedelta_range("1 days", "10 days") expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") @@ -1352,8 +1317,9 @@ def test_td64arr_add_offset_index(self, names, box): # TODO: combine with test_td64arr_add_offset_index by parametrizing # over second box? - def test_td64arr_add_offset_array(self, box): + def test_td64arr_add_offset_array(self, box_with_array): # GH#18849 + box = box_with_array tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) @@ -1429,13 +1395,12 @@ def test_td64arr_with_offset_series(self, names, box_df_fail): # GH#18849 box = box_df_fail box2 = Series if box in [pd.Index, tm.to_array] else box + exname = names[2] if box is not tm.to_array else names[1] tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1]) - expected_add = Series( - [tdi[n] + other[n] for n in range(len(tdi))], name=names[2] - ) + expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname) tdi = tm.box_expected(tdi, box) expected_add = tm.box_expected(expected_add, box2) @@ -1448,9 +1413,7 @@ def test_td64arr_with_offset_series(self, names, box_df_fail): tm.assert_equal(res2, expected_add) # TODO: separate/parametrize add/sub test? - expected_sub = Series( - [tdi[n] - other[n] for n in range(len(tdi))], name=names[2] - ) + expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname) expected_sub = tm.box_expected(expected_sub, box2) with tm.assert_produces_warning(PerformanceWarning): @@ -2051,6 +2014,8 @@ def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype): def test_td64arr_mul_int_series(self, box_df_fail, names): # GH#19042 test for correct name attachment box = box_df_fail # broadcasts along wrong axis, but doesn't raise + exname = names[2] if box is not tm.to_array else names[1] + tdi = TimedeltaIndex( ["0days", "1day", "2days", "3days", "4days"], name=names[0] ) @@ -2060,11 +2025,11 @@ def test_td64arr_mul_int_series(self, box_df_fail, names): expected = Series( ["0days", "1day", "4days", "9days", "16days"], dtype="timedelta64[ns]", - name=names[2], + name=exname, ) tdi = tm.box_expected(tdi, box) - box = Series if (box is pd.Index and type(ser) is Series) else box + box = Series if (box is pd.Index or box is tm.to_array) else box expected = tm.box_expected(expected, box) result = ser * tdi @@ -2115,7 +2080,11 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names): tm.assert_equal(result, expected) -class TestTimedeltaArraylikeInvalidArithmeticOps: +class TestTimedelta64ArrayLikeArithmetic: + # Arithmetic tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic + # tests will eventually end up here. + def test_td64arr_pow_invalid(self, scalar_td, box_with_array): td1 = Series([timedelta(minutes=5, seconds=3)] * 3) td1.iloc[2] = np.nan
Hopefully before long we can get rid of `box` fixture altogether, then rename `box_with_array` to `box`
https://api.github.com/repos/pandas-dev/pandas/pulls/27950
2019-08-16T16:25:21Z
2019-09-02T21:31:29Z
2019-09-02T21:31:29Z
2019-09-02T22:53:49Z
API: Add string extension type
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index e13738b98833a..45be38b40b658 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -266,6 +266,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then -k"-from_arrays -from_breaks -from_intervals -from_tuples -set_closed -to_tuples -interval_range" RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests arrays/string_.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/arrays/string_.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + fi ### DOCSTRINGS ### diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst index 802ffadf2a81e..36a7166f350e5 100644 --- a/doc/source/getting_started/basics.rst +++ b/doc/source/getting_started/basics.rst @@ -986,7 +986,7 @@ not noted for a particular column will be ``NaN``: tsdf.agg({'A': ['mean', 'min'], 'B': 'sum'}) -.. _basics.aggregation.mixed_dtypes: +.. _basics.aggregation.mixed_string: Mixed dtypes ++++++++++++ @@ -1704,7 +1704,8 @@ built-in string methods. For example: .. ipython:: python - s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'], + dtype="string") s.str.lower() Powerful pattern-matching methods are provided as well, but note that @@ -1712,6 +1713,12 @@ pattern-matching generally uses `regular expressions <https://docs.python.org/3/library/re.html>`__ by default (and in some cases always uses them). +.. note:: + + Prior to pandas 1.0, string methods were only available on ``object`` -dtype + ``Series``. Pandas 1.0 added the :class:`StringDtype` which is dedicated + to strings. See :ref:`text.types` for more. + Please see :ref:`Vectorized String Methods <text.string_methods>` for a complete description. @@ -1925,9 +1932,15 @@ period (time spans) :class:`PeriodDtype` :class:`Period` :class:`arrays. sparse :class:`SparseDtype` (none) :class:`arrays.SparseArray` :ref:`sparse` intervals :class:`IntervalDtype` :class:`Interval` :class:`arrays.IntervalArray` :ref:`advanced.intervalindex` nullable integer :class:`Int64Dtype`, ... (none) :class:`arrays.IntegerArray` :ref:`integer_na` +Strings :class:`StringDtype` :class:`str` :class:`arrays.StringArray` :ref:`text` =================== ========================= ================== ============================= ============================= -Pandas uses the ``object`` dtype for storing strings. +Pandas has two ways to store strings. + +1. ``object`` dtype, which can hold any Python object, including strings. +2. :class:`StringDtype`, which is dedicated to strings. + +Generally, we recommend using :class:`StringDtype`. See :ref:`text.types` fore more. Finally, arbitrary objects may be stored using the ``object`` dtype, but should be avoided to the extent possible (for performance and interoperability with diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index 7f464bf952bfb..0c435e06ac57f 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -24,6 +24,7 @@ Intervals :class:`IntervalDtype` :class:`Interval` :ref:`api.array Nullable Integer :class:`Int64Dtype`, ... (none) :ref:`api.arrays.integer_na` Categorical :class:`CategoricalDtype` (none) :ref:`api.arrays.categorical` Sparse :class:`SparseDtype` (none) :ref:`api.arrays.sparse` +Strings :class:`StringDtype` :class:`str` :ref:`api.arrays.string` =================== ========================= ================== ============================= Pandas and third-party libraries can extend NumPy's type system (see :ref:`extending.extension-types`). @@ -460,6 +461,29 @@ and methods if the :class:`Series` contains sparse values. See :ref:`api.series.sparse` for more. +.. _api.arrays.string: + +Text data +--------- + +When working with text data, where each valid element is a string or missing, +we recommend using :class:`StringDtype` (with the alias ``"string"``). + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + arrays.StringArray + +.. autosummary:: + :toctree: api/ + :template: autosummary/class_without_autosummary.rst + + StringDtype + +The ``Series.str`` accessor is available for ``Series`` backed by a :class:`arrays.StringArray`. +See :ref:`api.series.str` for more. + .. Dtype attributes which are manually listed in their docstrings: including .. it here to make sure a docstring page is built for them @@ -471,4 +495,4 @@ and methods if the :class:`Series` contains sparse values. See DatetimeTZDtype.unit DatetimeTZDtype.tz PeriodDtype.freq - IntervalDtype.subtype \ No newline at end of file + IntervalDtype.subtype diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst index acb5810e5252a..789ff2a65355b 100644 --- a/doc/source/user_guide/text.rst +++ b/doc/source/user_guide/text.rst @@ -6,8 +6,71 @@ Working with text data ====================== +.. _text.types: + +Text Data Types +--------------- + +.. versionadded:: 1.0.0 + +There are two main ways to store text data + +1. ``object`` -dtype NumPy array. +2. :class:`StringDtype` extension type. + +We recommend using :class:`StringDtype` to store text data. + +Prior to pandas 1.0, ``object`` dtype was the only option. This was unfortunate +for many reasons: + +1. You can accidentally store a *mixture* of strings and non-strings in an + ``object`` dtype array. It's better to have a dedicated dtype. +2. ``object`` dtype breaks dtype-specific operations like :meth:`DataFrame.select_dtypes`. + There isn't a clear way to select *just* text while excluding non-text + but still object-dtype columns. +3. When reading code, the contents of an ``object`` dtype array is less clear + than ``'string'``. + +Currently, the performance of ``object`` dtype arrays of strings and +:class:`arrays.StringArray` are about the same. We expect future enhancements +to significantly increase the performance and lower the memory overhead of +:class:`~arrays.StringArray`. + +.. warning:: + + ``StringArray`` is currently considered experimental. The implementation + and parts of the API may change without warning. + +For backwards-compatibility, ``object`` dtype remains the default type we +infer a list of strings to + +.. ipython:: python + + pd.Series(['a', 'b', 'c']) + +To explicitly request ``string`` dtype, specify the ``dtype`` + +.. ipython:: python + + pd.Series(['a', 'b', 'c'], dtype="string") + pd.Series(['a', 'b', 'c'], dtype=pd.StringDtype()) + +Or ``astype`` after the ``Series`` or ``DataFrame`` is created + +.. ipython:: python + + s = pd.Series(['a', 'b', 'c']) + s + s.astype("string") + +Everything that follows in the rest of this document applies equally to +``string`` and ``object`` dtype. + .. _text.string_methods: +String Methods +-------------- + Series and Index are equipped with a set of string processing methods that make it easy to operate on each element of the array. Perhaps most importantly, these methods exclude missing/NA values automatically. These are @@ -16,7 +79,8 @@ the equivalent (scalar) built-in string methods: .. ipython:: python - s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'], + dtype="string") s.str.lower() s.str.upper() s.str.len() @@ -90,7 +154,7 @@ Methods like ``split`` return a Series of lists: .. ipython:: python - s2 = pd.Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h']) + s2 = pd.Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'], dtype="string") s2.str.split('_') Elements in the split lists can be accessed using ``get`` or ``[]`` notation: @@ -106,6 +170,9 @@ It is easy to expand this to return a DataFrame using ``expand``. s2.str.split('_', expand=True) +When original ``Series`` has :class:`StringDtype`, the output columns will all +be :class:`StringDtype` as well. + It is also possible to limit the number of splits: .. ipython:: python @@ -125,7 +192,8 @@ i.e., from the end of the string to the beginning of the string: .. ipython:: python s3 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', - '', np.nan, 'CABA', 'dog', 'cat']) + '', np.nan, 'CABA', 'dog', 'cat'], + dtype="string") s3 s3.str.replace('^.a|dog', 'XX-XX ', case=False) @@ -136,7 +204,7 @@ following code will cause trouble because of the regular expression meaning of .. ipython:: python # Consider the following badly formatted financial data - dollars = pd.Series(['12', '-$10', '$10,000']) + dollars = pd.Series(['12', '-$10', '$10,000'], dtype="string") # This does what you'd naively expect: dollars.str.replace('$', '') @@ -174,7 +242,8 @@ positional argument (a regex object) and return a string. def repl(m): return m.group(0)[::-1] - pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(pat, repl) + pd.Series(['foo 123', 'bar baz', np.nan], + dtype="string").str.replace(pat, repl) # Using regex groups pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" @@ -182,7 +251,8 @@ positional argument (a regex object) and return a string. def repl(m): return m.group('two').swapcase() - pd.Series(['Foo Bar Baz', np.nan]).str.replace(pat, repl) + pd.Series(['Foo Bar Baz', np.nan], + dtype="string").str.replace(pat, repl) .. versionadded:: 0.20.0 @@ -221,7 +291,7 @@ The content of a ``Series`` (or ``Index``) can be concatenated: .. ipython:: python - s = pd.Series(['a', 'b', 'c', 'd']) + s = pd.Series(['a', 'b', 'c', 'd'], dtype="string") s.str.cat(sep=',') If not specified, the keyword ``sep`` for the separator defaults to the empty string, ``sep=''``: @@ -234,7 +304,7 @@ By default, missing values are ignored. Using ``na_rep``, they can be given a re .. ipython:: python - t = pd.Series(['a', 'b', np.nan, 'd']) + t = pd.Series(['a', 'b', np.nan, 'd'], dtype="string") t.str.cat(sep=',') t.str.cat(sep=',', na_rep='-') @@ -279,7 +349,8 @@ the ``join``-keyword. .. ipython:: python :okwarning: - u = pd.Series(['b', 'd', 'a', 'c'], index=[1, 3, 0, 2]) + u = pd.Series(['b', 'd', 'a', 'c'], index=[1, 3, 0, 2], + dtype="string") s u s.str.cat(u) @@ -295,7 +366,8 @@ In particular, alignment also means that the different lengths do not need to co .. ipython:: python - v = pd.Series(['z', 'a', 'b', 'd', 'e'], index=[-1, 0, 1, 3, 4]) + v = pd.Series(['z', 'a', 'b', 'd', 'e'], index=[-1, 0, 1, 3, 4], + dtype="string") s v s.str.cat(v, join='left', na_rep='-') @@ -351,7 +423,8 @@ of the string, the result will be a ``NaN``. .. ipython:: python s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, - 'CABA', 'dog', 'cat']) + 'CABA', 'dog', 'cat'], + dtype="string") s.str[0] s.str[1] @@ -382,7 +455,8 @@ DataFrame with one column per group. .. ipython:: python - pd.Series(['a1', 'b2', 'c3']).str.extract(r'([ab])(\d)', expand=False) + pd.Series(['a1', 'b2', 'c3'], + dtype="string").str.extract(r'([ab])(\d)', expand=False) Elements that do not match return a row filled with ``NaN``. Thus, a Series of messy strings can be "converted" into a like-indexed Series @@ -395,14 +469,16 @@ Named groups like .. ipython:: python - pd.Series(['a1', 'b2', 'c3']).str.extract(r'(?P<letter>[ab])(?P<digit>\d)', - expand=False) + pd.Series(['a1', 'b2', 'c3'], + dtype="string").str.extract(r'(?P<letter>[ab])(?P<digit>\d)', + expand=False) and optional groups like .. ipython:: python - pd.Series(['a1', 'b2', '3']).str.extract(r'([ab])?(\d)', expand=False) + pd.Series(['a1', 'b2', '3'], + dtype="string").str.extract(r'([ab])?(\d)', expand=False) can also be used. Note that any capture group names in the regular expression will be used for column names; otherwise capture group @@ -413,20 +489,23 @@ with one column if ``expand=True``. .. ipython:: python - pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=True) + pd.Series(['a1', 'b2', 'c3'], + dtype="string").str.extract(r'[ab](\d)', expand=True) It returns a Series if ``expand=False``. .. ipython:: python - pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=False) + pd.Series(['a1', 'b2', 'c3'], + dtype="string").str.extract(r'[ab](\d)', expand=False) Calling on an ``Index`` with a regex with exactly one capture group returns a ``DataFrame`` with one column if ``expand=True``. .. ipython:: python - s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"]) + s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"], + dtype="string") s s.index.str.extract("(?P<letter>[a-zA-Z])", expand=True) @@ -471,7 +550,8 @@ Unlike ``extract`` (which returns only the first match), .. ipython:: python - s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"]) + s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"], + dtype="string") s two_groups = '(?P<letter>[a-z])(?P<digit>[0-9])' s.str.extract(two_groups, expand=True) @@ -489,7 +569,7 @@ When each subject string in the Series has exactly one match, .. ipython:: python - s = pd.Series(['a3', 'b3', 'c2']) + s = pd.Series(['a3', 'b3', 'c2'], dtype="string") s then ``extractall(pat).xs(0, level='match')`` gives the same result as @@ -510,7 +590,7 @@ same result as a ``Series.str.extractall`` with a default index (starts from 0). pd.Index(["a1a2", "b1", "c1"]).str.extractall(two_groups) - pd.Series(["a1a2", "b1", "c1"]).str.extractall(two_groups) + pd.Series(["a1a2", "b1", "c1"], dtype="string").str.extractall(two_groups) Testing for Strings that match or contain a pattern @@ -521,13 +601,15 @@ You can check whether elements contain a pattern: .. ipython:: python pattern = r'[0-9][a-z]' - pd.Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern) + pd.Series(['1', '2', '3a', '3b', '03c'], + dtype="string").str.contains(pattern) Or whether elements match a pattern: .. ipython:: python - pd.Series(['1', '2', '3a', '3b', '03c']).str.match(pattern) + pd.Series(['1', '2', '3a', '3b', '03c'], + dtype="string").str.match(pattern) The distinction between ``match`` and ``contains`` is strictness: ``match`` relies on strict ``re.match``, while ``contains`` relies on ``re.search``. @@ -537,7 +619,8 @@ an extra ``na`` argument so missing values can be considered True or False: .. ipython:: python - s4 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s4 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'], + dtype="string") s4.str.contains('A', na=False) .. _text.indicator: @@ -550,7 +633,7 @@ For example if they are separated by a ``'|'``: .. ipython:: python - s = pd.Series(['a', 'a|b', np.nan, 'a|c']) + s = pd.Series(['a', 'a|b', np.nan, 'a|c'], dtype="string") s.str.get_dummies(sep='|') String ``Index`` also supports ``get_dummies`` which returns a ``MultiIndex``. diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 16d23d675a8bb..3f7c1a8a5222e 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -50,14 +50,56 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ -- :meth:`DataFrame.to_string` added the ``max_colwidth`` parameter to control when wide columns are truncated (:issue:`9784`) -- +.. _whatsnew_100.string: + +Dedicated string data type +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We've added :class:`StringDtype`, an extension type dedicated to string data. +Previously, strings were typically stored in object-dtype NumPy arrays. + +.. warning:: + + ``StringDtype`` and is currently considered experimental. The implementation + and parts of the API may change without warning. + +The text extension type solves several issues with object-dtype NumPy arrays: + +1. You can accidentally store a *mixture* of strings and non-strings in an + ``object`` dtype array. A ``StringArray`` can only store strings. +2. ``object`` dtype breaks dtype-specific operations like :meth:`DataFrame.select_dtypes`. + There isn't a clear way to select *just* text while excluding non-text, + but still object-dtype columns. +3. When reading code, the contents of an ``object`` dtype array is less clear + than ``string``. + + +.. ipython:: python + + pd.Series(['abc', None, 'def'], dtype=pd.StringDtype()) + +You can use the alias ``"string"`` as well. + +.. ipython:: python + + s = pd.Series(['abc', None, 'def'], dtype="string") + s + +The usual string accessor methods work. Where appropriate, the return type +of the Series or columns of a DataFrame will also have string dtype. + + s.str.upper() + s.str.split('b', expand=True).dtypes + +We recommend explicitly using the ``string`` data type when working with strings. +See :ref:`text.types` for more. .. _whatsnew_1000.enhancements.other: Other enhancements ^^^^^^^^^^^^^^^^^^ +- :meth:`DataFrame.to_string` added the ``max_colwidth`` parameter to control when wide columns are truncated (:issue:`9784`) - :meth:`MultiIndex.from_product` infers level names from inputs if not explicitly provided (:issue:`27292`) - :meth:`DataFrame.to_latex` now accepts ``caption`` and ``label`` arguments (:issue:`25436`) - The :ref:`integer dtype <integer_na>` with support for missing values can now be converted to diff --git a/pandas/__init__.py b/pandas/__init__.py index 6d0c55a45ed46..5d163e411c0ac 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -66,6 +66,7 @@ PeriodDtype, IntervalDtype, DatetimeTZDtype, + StringDtype, # missing isna, isnull, diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py index db01f2a0c674f..9870b5bed076d 100644 --- a/pandas/arrays/__init__.py +++ b/pandas/arrays/__init__.py @@ -11,6 +11,7 @@ PandasArray, PeriodArray, SparseArray, + StringArray, TimedeltaArray, ) @@ -22,5 +23,6 @@ "PandasArray", "PeriodArray", "SparseArray", + "StringArray", "TimedeltaArray", ] diff --git a/pandas/core/api.py b/pandas/core/api.py index bd2a57a15bdd2..04f2f84c92a15 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -10,6 +10,7 @@ ) from pandas.core.dtypes.missing import isna, isnull, notna, notnull +# TODO: Remove get_dummies import when statsmodels updates #18264 from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.arrays import Categorical from pandas.core.arrays.integer import ( @@ -22,12 +23,9 @@ UInt32Dtype, UInt64Dtype, ) +from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import array - from pandas.core.groupby import Grouper, NamedAgg - -# DataFrame needs to be imported after NamedAgg to avoid a circular import -from pandas.core.frame import DataFrame # isort:skip from pandas.core.index import ( CategoricalIndex, DatetimeIndex, @@ -47,9 +45,7 @@ from pandas.core.indexes.period import Period, period_range from pandas.core.indexes.timedeltas import Timedelta, timedelta_range from pandas.core.indexing import IndexSlice -from pandas.core.reshape.reshape import ( - get_dummies, -) # TODO: Remove get_dummies import when statsmodels updates #18264 +from pandas.core.reshape.reshape import get_dummies from pandas.core.series import Series from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.numeric import to_numeric @@ -57,3 +53,6 @@ from pandas.io.formats.format import set_eng_float_format from pandas.tseries.offsets import DateOffset + +# DataFrame needs to be imported after NamedAgg to avoid a circular import +from pandas.core.frame import DataFrame # isort:skip diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 5c83ed8cf5e24..868118bac6a7b 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -10,4 +10,5 @@ from .numpy_ import PandasArray, PandasDtype # noqa: F401 from .period import PeriodArray, period_array # noqa: F401 from .sparse import SparseArray # noqa: F401 +from .string_ import StringArray # noqa: F401 from .timedeltas import TimedeltaArray # noqa: F401 diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 32da0199e28f8..bf7404e8997c6 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -10,7 +10,7 @@ from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries -from pandas.core.dtypes.inference import is_array_like, is_list_like +from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import isna from pandas import compat @@ -229,13 +229,15 @@ def __getitem__(self, item): def __setitem__(self, key, value): value = extract_array(value, extract_numpy=True) - if not lib.is_scalar(key) and is_list_like(key): + scalar_key = lib.is_scalar(key) + scalar_value = lib.is_scalar(value) + + if not scalar_key and scalar_value: key = np.asarray(key) - if not lib.is_scalar(value): - value = np.asarray(value) + if not scalar_value: + value = np.asarray(value, dtype=self._ndarray.dtype) - value = np.asarray(value, dtype=self._ndarray.dtype) self._ndarray[key] = value def __len__(self) -> int: diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py new file mode 100644 index 0000000000000..87649ac651127 --- /dev/null +++ b/pandas/core/arrays/string_.py @@ -0,0 +1,281 @@ +import operator +from typing import TYPE_CHECKING, Type + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries +from pandas.core.dtypes.inference import is_array_like + +from pandas import compat +from pandas.core import ops +from pandas.core.arrays import PandasArray +from pandas.core.construction import extract_array +from pandas.core.missing import isna + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +@register_extension_dtype +class StringDtype(ExtensionDtype): + """ + Extension dtype for string data. + + .. versionadded:: 1.0.0 + + .. warning:: + + StringDtype is considered experimental. The implementation and + parts of the API may change without warning. + + In particular, StringDtype.na_value may change to no longer be + ``numpy.nan``. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.StringDtype() + StringDtype + """ + + @property + def na_value(self) -> "Scalar": + """ + StringDtype uses :attr:`numpy.nan` as the missing NA value. + + .. warning:: + + `na_value` may change in a future release. + """ + return np.nan + + @property + def type(self) -> Type: + return str + + @property + def name(self) -> str: + """ + The alias for StringDtype is ``'string'``. + """ + return "string" + + @classmethod + def construct_from_string(cls, string: str) -> ExtensionDtype: + if string == "string": + return cls() + return super().construct_from_string(string) + + @classmethod + def construct_array_type(cls) -> "Type[StringArray]": + return StringArray + + def __repr__(self) -> str: + return "StringDtype" + + +class StringArray(PandasArray): + """ + Extension array for string data. + + .. versionadded:: 1.0.0 + + .. warning:: + + StringArray is considered experimental. The implementation and + parts of the API may change without warning. + + In particular, the NA value used may change to no longer be + ``numpy.nan``. + + Parameters + ---------- + values : array-like + The array of data. + + .. warning:: + + Currently, this expects an object-dtype ndarray + where the elements are Python strings. This may + change without warning in the future. + copy : bool, default False + Whether to copy the array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + Series.str + The string methods are available on Series backed by + a StringArray. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string") + <StringArray> + ['This is', 'some text', nan, 'data.'] + Length: 4, dtype: string + + Unlike ``object`` dtype arrays, ``StringArray`` doesn't allow non-string + values. + + >>> pd.array(['1', 1], dtype="string") + Traceback (most recent call last): + ... + ValueError: StringArray requires an object-dtype ndarray of strings. + """ + + # undo the PandasArray hack + _typ = "extension" + + def __init__(self, values, copy=False): + values = extract_array(values) + skip_validation = isinstance(values, type(self)) + + super().__init__(values, copy=copy) + self._dtype = StringDtype() + if not skip_validation: + self._validate() + + def _validate(self): + """Validate that we only store NA or strings.""" + if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True): + raise ValueError( + "StringArray requires a sequence of strings or missing values." + ) + if self._ndarray.dtype != "object": + raise ValueError( + "StringArray requires a sequence of strings. Got " + "'{}' dtype instead.".format(self._ndarray.dtype) + ) + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + if dtype: + assert dtype == "string" + result = super()._from_sequence(scalars, dtype=object, copy=copy) + # convert None to np.nan + # TODO: it would be nice to do this in _validate / lib.is_string_array + # We are already doing a scan over the values there. + result[result.isna()] = np.nan + return result + + @classmethod + def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + def __setitem__(self, key, value): + value = extract_array(value, extract_numpy=True) + if isinstance(value, type(self)): + # extract_array doesn't extract PandasArray subclasses + value = value._ndarray + + scalar_key = lib.is_scalar(key) + scalar_value = lib.is_scalar(value) + if scalar_key and not scalar_value: + raise ValueError("setting an array element with a sequence.") + + # validate new items + if scalar_value: + if scalar_value is None: + value = np.nan + elif not (isinstance(value, str) or np.isnan(value)): + raise ValueError( + "Cannot set non-string value '{}' into a StringArray.".format(value) + ) + else: + if not is_array_like(value): + value = np.asarray(value, dtype=object) + if len(value) and not lib.is_string_array(value, skipna=True): + raise ValueError("Must provide strings.") + + super().__setitem__(key, value) + + def fillna(self, value=None, method=None, limit=None): + # TODO: validate dtype + return super().fillna(value, method, limit) + + def astype(self, dtype, copy=True): + dtype = pandas_dtype(dtype) + if isinstance(dtype, StringDtype): + if copy: + return self.copy() + return self + return super().astype(dtype, copy) + + def _reduce(self, name, skipna=True, **kwargs): + raise TypeError("Cannot perform reduction '{}' with string dtype".format(name)) + + def value_counts(self, dropna=False): + from pandas import value_counts + + return value_counts(self._ndarray, dropna=dropna) + + # Overrride parent because we have different return types. + @classmethod + def _create_arithmetic_method(cls, op): + def method(self, other): + if isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame)): + return NotImplemented + + elif isinstance(other, cls): + other = other._ndarray + + mask = isna(self) | isna(other) + valid = ~mask + + if not lib.is_scalar(other): + if len(other) != len(self): + # prevent improper broadcasting when other is 2D + raise ValueError( + "Lengths of operands do not match: {} != {}".format( + len(self), len(other) + ) + ) + + other = np.asarray(other) + other = other[valid] + + result = np.empty_like(self._ndarray, dtype="object") + result[mask] = np.nan + result[valid] = op(self._ndarray[valid], other) + + if op.__name__ in {"add", "radd", "mul", "rmul"}: + return StringArray(result) + else: + dtype = "object" if mask.any() else "bool" + return np.asarray(result, dtype=dtype) + + return compat.set_function_name(method, "__{}__".format(op.__name__), cls) + + @classmethod + def _add_arithmetic_ops(cls): + cls.__add__ = cls._create_arithmetic_method(operator.add) + cls.__radd__ = cls._create_arithmetic_method(ops.radd) + + cls.__mul__ = cls._create_arithmetic_method(operator.mul) + cls.__rmul__ = cls._create_arithmetic_method(ops.rmul) + + _create_comparison_method = _create_arithmetic_method + + +StringArray._add_arithmetic_ops() +StringArray._add_comparison_ops() diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index cd87fbef02e4f..56bfbefdbf248 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -128,6 +128,7 @@ def isna(obj): def _isna_new(obj): + if is_scalar(obj): return libmissing.checknull(obj) # hack (for now) because MI registers as ndarray diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 25350119f9df5..888d2ae6f9473 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -763,6 +763,16 @@ def f(x): return f +def _result_dtype(arr): + # workaround #27953 + # ideally we just pass `dtype=arr.dtype` unconditionally, but this fails + # when the list of values is empty. + if arr.dtype.name == "string": + return "string" + else: + return object + + def _str_extract_noexpand(arr, pat, flags=0): """ Find groups in each string in the Series using passed regular @@ -817,11 +827,12 @@ def _str_extract_frame(arr, pat, flags=0): result_index = arr.index except AttributeError: result_index = None + dtype = _result_dtype(arr) return DataFrame( [groups_or_na(val) for val in arr], columns=columns, index=result_index, - dtype=object, + dtype=dtype, ) @@ -1019,8 +1030,11 @@ def str_extractall(arr, pat, flags=0): from pandas import MultiIndex index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"]) + dtype = _result_dtype(arr) - result = arr._constructor_expanddim(match_list, index=index, columns=columns) + result = arr._constructor_expanddim( + match_list, index=index, columns=columns, dtype=dtype + ) return result @@ -1073,7 +1087,7 @@ def str_get_dummies(arr, sep="|"): for i, t in enumerate(tags): pat = sep + t + sep - dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x) + dummies[:, i] = lib.map_infer(arr.to_numpy(), lambda x: pat in x) return dummies, tags @@ -1858,11 +1872,18 @@ def wrapper(self, *args, **kwargs): return _forbid_nonstring_types -def _noarg_wrapper(f, name=None, docstring=None, forbidden_types=["bytes"], **kargs): +def _noarg_wrapper( + f, + name=None, + docstring=None, + forbidden_types=["bytes"], + returns_string=True, + **kargs +): @forbid_nonstring_types(forbidden_types, name=name) def wrapper(self): result = _na_map(f, self._parent, **kargs) - return self._wrap_result(result) + return self._wrap_result(result, returns_string=returns_string) wrapper.__name__ = f.__name__ if name is None else name if docstring is not None: @@ -1874,22 +1895,28 @@ def wrapper(self): def _pat_wrapper( - f, flags=False, na=False, name=None, forbidden_types=["bytes"], **kwargs + f, + flags=False, + na=False, + name=None, + forbidden_types=["bytes"], + returns_string=True, + **kwargs ): @forbid_nonstring_types(forbidden_types, name=name) def wrapper1(self, pat): result = f(self._parent, pat) - return self._wrap_result(result) + return self._wrap_result(result, returns_string=returns_string) @forbid_nonstring_types(forbidden_types, name=name) def wrapper2(self, pat, flags=0, **kwargs): result = f(self._parent, pat, flags=flags, **kwargs) - return self._wrap_result(result) + return self._wrap_result(result, returns_string=returns_string) @forbid_nonstring_types(forbidden_types, name=name) def wrapper3(self, pat, na=np.nan): result = f(self._parent, pat, na=na) - return self._wrap_result(result) + return self._wrap_result(result, returns_string=returns_string) wrapper = wrapper3 if na else wrapper2 if flags else wrapper1 @@ -1926,6 +1953,7 @@ class StringMethods(NoNewAttributesMixin): def __init__(self, data): self._inferred_dtype = self._validate(data) self._is_categorical = is_categorical_dtype(data) + self._is_string = data.dtype.name == "string" # .values.categories works for both Series/Index self._parent = data.values.categories if self._is_categorical else data @@ -1956,6 +1984,8 @@ def _validate(data): ------- dtype : inferred dtype of data """ + from pandas import StringDtype + if isinstance(data, ABCMultiIndex): raise AttributeError( "Can only use .str accessor with Index, not MultiIndex" @@ -1967,6 +1997,10 @@ def _validate(data): values = getattr(data, "values", data) # Series / Index values = getattr(values, "categories", values) # categorical / normal + # explicitly allow StringDtype + if isinstance(values.dtype, StringDtype): + return "string" + try: inferred_dtype = lib.infer_dtype(values, skipna=True) except ValueError: @@ -1992,7 +2026,13 @@ def __iter__(self): g = self.get(i) def _wrap_result( - self, result, use_codes=True, name=None, expand=None, fill_value=np.nan + self, + result, + use_codes=True, + name=None, + expand=None, + fill_value=np.nan, + returns_string=True, ): from pandas import Index, Series, MultiIndex @@ -2012,6 +2052,15 @@ def _wrap_result( return result assert result.ndim < 3 + # We can be wrapping a string / object / categorical result, in which + # case we'll want to return the same dtype as the input. + # Or we can be wrapping a numeric output, in which case we don't want + # to return a StringArray. + if self._is_string and returns_string: + dtype = "string" + else: + dtype = None + if expand is None: # infer from ndim if expand is not specified expand = result.ndim != 1 @@ -2069,11 +2118,12 @@ def cons_row(x): index = self._orig.index if expand: cons = self._orig._constructor_expanddim - return cons(result, columns=name, index=index) + result = cons(result, columns=name, index=index, dtype=dtype) else: # Must be a Series cons = self._orig._constructor - return cons(result, name=name, index=index) + result = cons(result, name=name, index=index, dtype=dtype) + return result def _get_series_list(self, others): """ @@ -2338,9 +2388,12 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): # add dtype for case that result is all-NA result = Index(result, dtype=object, name=self._orig.name) else: # Series - result = Series( - result, dtype=object, index=data.index, name=self._orig.name - ) + if is_categorical_dtype(self._orig.dtype): + # We need to infer the new categories. + dtype = None + else: + dtype = self._orig.dtype + result = Series(result, dtype=dtype, index=data.index, name=self._orig.name) return result _shared_docs[ @@ -2479,13 +2532,13 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): @forbid_nonstring_types(["bytes"]) def split(self, pat=None, n=-1, expand=False): result = str_split(self._parent, pat, n=n) - return self._wrap_result(result, expand=expand) + return self._wrap_result(result, expand=expand, returns_string=expand) @Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"}) @forbid_nonstring_types(["bytes"]) def rsplit(self, pat=None, n=-1, expand=False): result = str_rsplit(self._parent, pat, n=n) - return self._wrap_result(result, expand=expand) + return self._wrap_result(result, expand=expand, returns_string=expand) _shared_docs[ "str_partition" @@ -2586,7 +2639,7 @@ def rsplit(self, pat=None, n=-1, expand=False): def partition(self, sep=" ", expand=True): f = lambda x: x.partition(sep) result = _na_map(f, self._parent) - return self._wrap_result(result, expand=expand) + return self._wrap_result(result, expand=expand, returns_string=expand) @Appender( _shared_docs["str_partition"] @@ -2602,7 +2655,7 @@ def partition(self, sep=" ", expand=True): def rpartition(self, sep=" ", expand=True): f = lambda x: x.rpartition(sep) result = _na_map(f, self._parent) - return self._wrap_result(result, expand=expand) + return self._wrap_result(result, expand=expand, returns_string=expand) @copy(str_get) def get(self, i): @@ -2621,13 +2674,13 @@ def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): result = str_contains( self._parent, pat, case=case, flags=flags, na=na, regex=regex ) - return self._wrap_result(result, fill_value=na) + return self._wrap_result(result, fill_value=na, returns_string=False) @copy(str_match) @forbid_nonstring_types(["bytes"]) def match(self, pat, case=True, flags=0, na=np.nan): result = str_match(self._parent, pat, case=case, flags=flags, na=na) - return self._wrap_result(result, fill_value=na) + return self._wrap_result(result, fill_value=na, returns_string=False) @copy(str_replace) @forbid_nonstring_types(["bytes"]) @@ -2762,13 +2815,14 @@ def slice_replace(self, start=None, stop=None, repl=None): def decode(self, encoding, errors="strict"): # need to allow bytes here result = str_decode(self._parent, encoding, errors) - return self._wrap_result(result) + # TODO: Not sure how to handle this. + return self._wrap_result(result, returns_string=False) @copy(str_encode) @forbid_nonstring_types(["bytes"]) def encode(self, encoding, errors="strict"): result = str_encode(self._parent, encoding, errors) - return self._wrap_result(result) + return self._wrap_result(result, returns_string=False) _shared_docs[ "str_strip" @@ -2869,7 +2923,11 @@ def get_dummies(self, sep="|"): data = self._orig.astype(str) if self._is_categorical else self._parent result, name = str_get_dummies(data, sep) return self._wrap_result( - result, use_codes=(not self._is_categorical), name=name, expand=True + result, + use_codes=(not self._is_categorical), + name=name, + expand=True, + returns_string=False, ) @copy(str_translate) @@ -2878,10 +2936,16 @@ def translate(self, table): result = str_translate(self._parent, table) return self._wrap_result(result) - count = _pat_wrapper(str_count, flags=True, name="count") - startswith = _pat_wrapper(str_startswith, na=True, name="startswith") - endswith = _pat_wrapper(str_endswith, na=True, name="endswith") - findall = _pat_wrapper(str_findall, flags=True, name="findall") + count = _pat_wrapper(str_count, flags=True, name="count", returns_string=False) + startswith = _pat_wrapper( + str_startswith, na=True, name="startswith", returns_string=False + ) + endswith = _pat_wrapper( + str_endswith, na=True, name="endswith", returns_string=False + ) + findall = _pat_wrapper( + str_findall, flags=True, name="findall", returns_string=False + ) @copy(str_extract) @forbid_nonstring_types(["bytes"]) @@ -2929,7 +2993,7 @@ def extractall(self, pat, flags=0): @forbid_nonstring_types(["bytes"]) def find(self, sub, start=0, end=None): result = str_find(self._parent, sub, start=start, end=end, side="left") - return self._wrap_result(result) + return self._wrap_result(result, returns_string=False) @Appender( _shared_docs["find"] @@ -2942,7 +3006,7 @@ def find(self, sub, start=0, end=None): @forbid_nonstring_types(["bytes"]) def rfind(self, sub, start=0, end=None): result = str_find(self._parent, sub, start=start, end=end, side="right") - return self._wrap_result(result) + return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(["bytes"]) def normalize(self, form): @@ -3004,7 +3068,7 @@ def normalize(self, form): @forbid_nonstring_types(["bytes"]) def index(self, sub, start=0, end=None): result = str_index(self._parent, sub, start=start, end=end, side="left") - return self._wrap_result(result) + return self._wrap_result(result, returns_string=False) @Appender( _shared_docs["index"] @@ -3018,7 +3082,7 @@ def index(self, sub, start=0, end=None): @forbid_nonstring_types(["bytes"]) def rindex(self, sub, start=0, end=None): result = str_index(self._parent, sub, start=start, end=end, side="right") - return self._wrap_result(result) + return self._wrap_result(result, returns_string=False) _shared_docs[ "len" @@ -3067,7 +3131,11 @@ def rindex(self, sub, start=0, end=None): dtype: float64 """ len = _noarg_wrapper( - len, docstring=_shared_docs["len"], forbidden_types=None, dtype=int + len, + docstring=_shared_docs["len"], + forbidden_types=None, + dtype=int, + returns_string=False, ) _shared_docs[ @@ -3339,46 +3407,55 @@ def rindex(self, sub, start=0, end=None): lambda x: x.isalnum(), name="isalnum", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"], + returns_string=False, ) isalpha = _noarg_wrapper( lambda x: x.isalpha(), name="isalpha", docstring=_shared_docs["ismethods"] % _doc_args["isalpha"], + returns_string=False, ) isdigit = _noarg_wrapper( lambda x: x.isdigit(), name="isdigit", docstring=_shared_docs["ismethods"] % _doc_args["isdigit"], + returns_string=False, ) isspace = _noarg_wrapper( lambda x: x.isspace(), name="isspace", docstring=_shared_docs["ismethods"] % _doc_args["isspace"], + returns_string=False, ) islower = _noarg_wrapper( lambda x: x.islower(), name="islower", docstring=_shared_docs["ismethods"] % _doc_args["islower"], + returns_string=False, ) isupper = _noarg_wrapper( lambda x: x.isupper(), name="isupper", docstring=_shared_docs["ismethods"] % _doc_args["isupper"], + returns_string=False, ) istitle = _noarg_wrapper( lambda x: x.istitle(), name="istitle", docstring=_shared_docs["ismethods"] % _doc_args["istitle"], + returns_string=False, ) isnumeric = _noarg_wrapper( lambda x: x.isnumeric(), name="isnumeric", docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"], + returns_string=False, ) isdecimal = _noarg_wrapper( lambda x: x.isdecimal(), name="isdecimal", docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"], + returns_string=False, ) @classmethod diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 2f24bbd6f0c85..6c50159663574 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -68,6 +68,7 @@ class TestPDApi(Base): "Series", "SparseArray", "SparseDtype", + "StringDtype", "Timedelta", "TimedeltaIndex", "Timestamp", diff --git a/pandas/tests/arrays/string_/__init__.py b/pandas/tests/arrays/string_/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py new file mode 100644 index 0000000000000..40221c34116ae --- /dev/null +++ b/pandas/tests/arrays/string_/test_string.py @@ -0,0 +1,160 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm + + +def test_none_to_nan(): + a = pd.arrays.StringArray._from_sequence(["a", None, "b"]) + assert a[1] is not None + assert np.isnan(a[1]) + + +def test_setitem_validates(): + a = pd.arrays.StringArray._from_sequence(["a", "b"]) + with pytest.raises(ValueError, match="10"): + a[0] = 10 + + with pytest.raises(ValueError, match="strings"): + a[:] = np.array([1, 2]) + + +@pytest.mark.parametrize( + "input, method", + [ + (["a", "b", "c"], operator.methodcaller("capitalize")), + (["a", "b", "c"], operator.methodcaller("capitalize")), + (["a b", "a bc. de"], operator.methodcaller("capitalize")), + ], +) +def test_string_methods(input, method): + a = pd.Series(input, dtype="string") + b = pd.Series(input, dtype="object") + result = method(a.str) + expected = method(b.str) + + assert result.dtype.name == "string" + tm.assert_series_equal(result.astype(object), expected) + + +def test_astype_roundtrip(): + s = pd.Series(pd.date_range("2000", periods=12)) + s[0] = None + + result = s.astype("string").astype("datetime64[ns]") + tm.assert_series_equal(result, s) + + +def test_add(): + a = pd.Series(["a", "b", "c", None, None], dtype="string") + b = pd.Series(["x", "y", None, "z", None], dtype="string") + + result = a + b + expected = pd.Series(["ax", "by", None, None, None], dtype="string") + tm.assert_series_equal(result, expected) + + result = a.add(b) + tm.assert_series_equal(result, expected) + + result = a.radd(b) + expected = pd.Series(["xa", "yb", None, None, None], dtype="string") + tm.assert_series_equal(result, expected) + + result = a.add(b, fill_value="-") + expected = pd.Series(["ax", "by", "c-", "-z", None], dtype="string") + tm.assert_series_equal(result, expected) + + +def test_add_2d(): + a = pd.array(["a", "b", "c"], dtype="string") + b = np.array([["a", "b", "c"]], dtype=object) + with pytest.raises(ValueError, match="3 != 1"): + a + b + + s = pd.Series(a) + with pytest.raises(ValueError, match="3 != 1"): + s + b + + +def test_add_sequence(): + a = pd.array(["a", "b", None, None], dtype="string") + other = ["x", None, "y", None] + + result = a + other + expected = pd.array(["ax", None, None, None], dtype="string") + tm.assert_extension_array_equal(result, expected) + + result = other + a + expected = pd.array(["xa", None, None, None], dtype="string") + tm.assert_extension_array_equal(result, expected) + + +def test_mul(): + a = pd.array(["a", "b", None], dtype="string") + result = a * 2 + expected = pd.array(["aa", "bb", None], dtype="string") + tm.assert_extension_array_equal(result, expected) + + result = 2 * a + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_strings(): + array = pd.array(["a", "b", "c", "d"], dtype="string") + df = pd.DataFrame([["t", "u", "v", "w"]]) + assert array.__add__(df) is NotImplemented + + result = array + df + expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype("string") + tm.assert_frame_equal(result, expected) + + result = df + array + expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype("string") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_frame(): + array = pd.array(["a", "b", np.nan, np.nan], dtype="string") + df = pd.DataFrame([["x", np.nan, "y", np.nan]]) + + assert array.__add__(df) is NotImplemented + + result = array + df + expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype("string") + tm.assert_frame_equal(result, expected) + + result = df + array + expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype("string") + tm.assert_frame_equal(result, expected) + + +def test_constructor_raises(): + with pytest.raises(ValueError, match="sequence of strings"): + pd.arrays.StringArray(np.array(["a", "b"], dtype="S1")) + + with pytest.raises(ValueError, match="sequence of strings"): + pd.arrays.StringArray(np.array([])) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce(skipna): + arr = pd.Series(["a", "b", "c"], dtype="string") + result = arr.sum(skipna=skipna) + assert result == "abc" + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce_missing(skipna): + arr = pd.Series([None, "a", None, "b", "c", None], dtype="string") + result = arr.sum(skipna=skipna) + if skipna: + assert result == "abc" + else: + assert pd.isna(result) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 266f7ac50c663..466b724f98770 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -291,6 +291,8 @@ def test_is_string_dtype(): assert com.is_string_dtype(str) assert com.is_string_dtype(object) assert com.is_string_dtype(np.array(["a", "b"])) + assert com.is_string_dtype(pd.StringDtype()) + assert com.is_string_dtype(pd.array(["a", "b"], dtype="string")) def test_is_period_arraylike(): diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py new file mode 100644 index 0000000000000..5b872d5b72227 --- /dev/null +++ b/pandas/tests/extension/test_string.py @@ -0,0 +1,112 @@ +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.string_ import StringArray, StringDtype +from pandas.tests.extension import base + + +@pytest.fixture +def dtype(): + return StringDtype() + + +@pytest.fixture +def data(): + strings = np.random.choice(list(string.ascii_letters), size=100) + while strings[0] == strings[1]: + strings = np.random.choice(list(string.ascii_letters), size=100) + + return StringArray._from_sequence(strings) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return StringArray._from_sequence([np.nan, "A"]) + + +@pytest.fixture +def data_for_sorting(): + return StringArray._from_sequence(["B", "C", "A"]) + + +@pytest.fixture +def data_missing_for_sorting(): + return StringArray._from_sequence(["B", np.nan, "A"]) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(): + return StringArray._from_sequence(["B", "B", np.nan, np.nan, "A", "A", "B", "C"]) + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestSetitem(base.BaseSetitemTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestNoReduce(base.BaseNoReduceTests): + pass + + +class TestMethods(base.BaseMethodsTests): + pass + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestComparisonOps(base.BaseComparisonOpsTests): + def _compare_other(self, s, data, op_name, other): + result = getattr(s, op_name)(other) + expected = getattr(s.astype(object), op_name)(other) + self.assert_series_equal(result, expected) + + def test_compare_scalar(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + self._compare_other(s, data, op_name, "abc") + + +class TestParsing(base.BaseParsingTests): + pass + + +class TestPrinting(base.BasePrintingTests): + pass + + +class TestGroupBy(base.BaseGroupbyTests): + pass diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index bc8dc7272a83a..b50f1a0fd2f2a 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -6,6 +6,8 @@ from numpy.random import randint import pytest +from pandas._libs import lib + from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna import pandas.core.strings as strings import pandas.util.testing as tm @@ -3269,3 +3271,25 @@ def test_casefold(self): result = s.str.casefold() tm.assert_series_equal(result, expected) + + +def test_string_array(any_string_method): + data = ["a", "bb", np.nan, "ccc"] + a = Series(data, dtype=object) + b = Series(data, dtype="string") + method_name, args, kwargs = any_string_method + + expected = getattr(a.str, method_name)(*args, **kwargs) + result = getattr(b.str, method_name)(*args, **kwargs) + + if isinstance(expected, Series): + if expected.dtype == "object" and lib.is_string_array( + expected.values, skipna=True + ): + assert result.dtype == "string" + result = result.astype(object) + elif isinstance(expected, DataFrame): + columns = expected.select_dtypes(include="object").columns + assert all(result[columns].dtypes == "string") + result[columns] = result[columns].astype(object) + tm.assert_equal(result, expected) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1c0a8dbc19ccd..5cd39e79199e6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1431,6 +1431,9 @@ def assert_equal(left, right, **kwargs): assert_extension_array_equal(left, right, **kwargs) elif isinstance(left, np.ndarray): assert_numpy_array_equal(left, right, **kwargs) + elif isinstance(left, str): + assert kwargs == {} + return left == right else: raise NotImplementedError(type(left))
This adds a new extension type 'string' for storing string data. The data model is essentially unchanged from master. String are still stored in an object-dtype ndarray. Scalar elements are still Python strs, and `np.nan` is still used as the string dtype. Things are pretty well contained. The major changes outside the new array are 1. docs 2. `core/strings.py` to handle things correctly (mostly, returning `string` dtype when there's a `string` input. No rush on reviewing this. Just parking it here for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/27949
2019-08-16T15:22:23Z
2019-10-05T23:17:41Z
2019-10-05T23:17:41Z
2019-10-07T11:33:37Z
Backport PR #27946 on branch 0.25.x (BUG: Merge with readonly arrays)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 245aa42b0de14..d1324bc759ea1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -127,9 +127,9 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) -- Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`) +- Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`) - :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) -- +- Bug in :meth:`DataFrame.join` raising with readonly arrays (:issue:`27943`) Sparse ^^^^^^ diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 3e620f5934d5e..b8df78e600a46 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -108,7 +108,7 @@ cdef class Int64Factorizer: def get_count(self): return self.count - def factorize(self, int64_t[:] values, sort=False, + def factorize(self, const int64_t[:] values, sort=False, na_sentinel=-1, na_value=None): """ Factorize values with nans replaced by na_sentinel diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index b6c6f967333a8..a04f093ee7818 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1340,6 +1340,18 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): expected = expected.reindex(columns=["a", "key", "b"]) tm.assert_frame_equal(result, expected) + def test_merge_readonly(self): + # https://github.com/pandas-dev/pandas/issues/27943 + data1 = pd.DataFrame( + np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"] + ) + data2 = pd.DataFrame( + np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] + ) + + data1._data.blocks[0].values.flags.writeable = False + data1.merge(data2) # no error + def _check_merge(x, y): for how in ["inner", "left", "outer"]:
Backport PR #27946: BUG: Merge with readonly arrays
https://api.github.com/repos/pandas-dev/pandas/pulls/27948
2019-08-16T12:08:27Z
2019-08-16T18:51:23Z
2019-08-16T18:51:23Z
2019-08-16T18:51:23Z