title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DOC: Clarify DataFrame column argument in API documentation
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 46501c97cf38a..60eb0339b1f1d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -472,8 +472,9 @@ class DataFrame(NDFrame, OpsMixin): Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like - Column labels to use for resulting frame. Will default to - RangeIndex (0, 1, 2, ..., n) if no column labels are provided. + Column labels to use for resulting frame when data does not have them, + defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, + will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool, default False @@ -523,6 +524,18 @@ class DataFrame(NDFrame, OpsMixin): 1 4 5 6 2 7 8 9 + Constructing DataFrame from a numpy ndarray that has labeled columns: + + >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], + ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) + >>> df3 = pd.DataFrame(data, columns=['c', 'a']) + ... + >>> df3 + c a + 0 3 1 + 1 6 4 + 2 9 7 + Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass
- [x] closes #39904 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40658
2021-03-27T18:01:02Z
2021-04-01T07:15:12Z
2021-04-01T07:15:11Z
2021-04-01T08:36:18Z
CLN: move pytest config to pyproject.toml
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6744f578560c..ca0c75f9de94f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,7 +65,7 @@ jobs: if: always() - name: Testing docstring validation script - run: pytest --capture=no --strict-markers scripts + run: pytest scripts if: always() - name: Running benchmarks diff --git a/.github/workflows/database.yml b/.github/workflows/database.yml index ba5a0a1fd0909..a5aef7825c770 100644 --- a/.github/workflows/database.yml +++ b/.github/workflows/database.yml @@ -78,7 +78,7 @@ jobs: uses: ./.github/actions/build_pandas - name: Test - run: pytest -m "${{ env.PATTERN }}" -n 2 --dist=loadfile -s --strict-markers --durations=30 --junitxml=test-data.xml -s --cov=pandas --cov-report=xml pandas/tests/io + run: pytest -m "${{ env.PATTERN }}" -n 2 --dist=loadfile --cov=pandas --cov-report=xml pandas/tests/io if: always() - name: Build Version diff --git a/ci/deps/actions-37-minimum_versions.yaml b/ci/deps/actions-37-minimum_versions.yaml index 8052156858a32..3237cf9770220 100644 --- a/ci/deps/actions-37-minimum_versions.yaml +++ b/ci/deps/actions-37-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython=0.29.21 - - pytest=5.0.1 + - pytest>=6.0 - pytest-cov - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/run_tests.sh b/ci/run_tests.sh index f5e3420b8c9b3..261d6364cb5e1 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -19,7 +19,7 @@ if [[ $(uname) == "Linux" && -z $DISPLAY ]]; then XVFB="xvfb-run " fi -PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile -s --strict-markers --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas" +PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE pandas" if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then # GH#37455 windows py38 build appears to be running out of memory @@ -30,7 +30,7 @@ fi echo $PYTEST_CMD sh -c "$PYTEST_CMD" -PYTEST_AM_CMD="PANDAS_DATA_MANAGER=array pytest -m \"$PATTERN and arraymanager\" -n $PYTEST_WORKERS --dist=loadfile -s --strict-markers --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas" +PYTEST_AM_CMD="PANDAS_DATA_MANAGER=array pytest -m \"$PATTERN and arraymanager\" -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE pandas" echo $PYTEST_AM_CMD sh -c "$PYTEST_AM_CMD" diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 89b21d1984ad3..16beb00d201b7 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -195,7 +195,7 @@ pandas is equipped with an exhaustive set of unit tests, covering about 97% of the code base as of this writing. To run it on your machine to verify that everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest -<https://docs.pytest.org/en/latest/>`__ >= 5.0.1 and `Hypothesis +<https://docs.pytest.org/en/latest/>`__ >= 6.0 and `Hypothesis <https://hypothesis.readthedocs.io/>`__ >= 3.58, then run: :: diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 842b50ce53b21..31517e363140d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -515,7 +515,7 @@ If installed, we now require: +-----------------+-----------------+----------+---------+ | numexpr | 2.6.8 | | | +-----------------+-----------------+----------+---------+ -| pytest (dev) | 5.0.1 | | | +| pytest (dev) | 6.0 | | X | +-----------------+-----------------+----------+---------+ | mypy (dev) | 0.800 | | X | +-----------------+-----------------+----------+---------+ diff --git a/pandas/conftest.py b/pandas/conftest.py index 35affa62ccf68..7b29c41ef70f5 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -66,28 +66,10 @@ MultiIndex, ) - # ---------------------------------------------------------------- # Configuration / Settings # ---------------------------------------------------------------- # pytest -def pytest_configure(config): - # Register marks to avoid warnings in pandas.test() - # sync with setup.cfg - config.addinivalue_line("markers", "single: mark a test as single cpu only") - config.addinivalue_line("markers", "slow: mark a test as slow") - config.addinivalue_line("markers", "network: mark a test as network") - config.addinivalue_line( - "markers", "db: tests requiring a database (mysql or postgres)" - ) - config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only") - config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test") - config.addinivalue_line( - "markers", "arm_slow: mark a test as slow for arm64 architecture" - ) - config.addinivalue_line( - "markers", "arraymanager: mark a test to run with ArrayManager enabled" - ) def pytest_addoption(parser): diff --git a/pyproject.toml b/pyproject.toml index 3ffda4e2149c0..01d28777eb47e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,3 +32,30 @@ exclude = ''' | setup.py ) ''' + +[tool.pytest.ini_options] +# sync minversion with pyproject.toml & install.rst +minversion = "6.0" +addopts = "--strict-data-files --strict-markers --capture=no --durations=30 --junitxml=test-data.xml" +xfail_strict = true +testpaths = "pandas" +doctest_optionflags = [ + "NORMALIZE_WHITESPACE", + "IGNORE_EXCEPTION_DETAIL", + "ELLIPSIS", +] +filterwarnings = [ + "error:Sparse:FutureWarning", + "error:The SparseArray:FutureWarning", +] +junit_family = "xunit2" +markers = [ + "single: mark a test as single cpu only", + "slow: mark a test as slow", + "network: mark a test as network", + "db: tests requiring a database (mysql or postgres)", + "high_memory: mark a test as a high-memory only", + "clipboard: mark a pd.read_clipboard test", + "arm_slow: mark a test as slow for arm64 architecture", + "arraymanager: mark a test to run with ArrayManager enabled", +] diff --git a/setup.cfg b/setup.cfg index 610b30e4422a9..f39e377e50c97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -45,7 +45,7 @@ pandas_plotting_backends = [options.extras_require] test = hypothesis>=3.58 - pytest>=5.0.1 + pytest>=6.0 pytest-xdist [options.package_data] @@ -127,18 +127,6 @@ exclude = # https://github.com/pandas-dev/pandas/pull/38837#issuecomment-752884156 doc/source/getting_started/comparison/includes/*.rst -[tool:pytest] -# sync minversion with setup.cfg & install.rst -minversion = 5.0.1 -testpaths = pandas -doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL ELLIPSIS -addopts = --strict-data-files -xfail_strict = True -filterwarnings = - error:Sparse:FutureWarning - error:The SparseArray:FutureWarning -junit_family = xunit2 - [codespell] ignore-words-list = ba,blocs,coo,hist,nd,sav,ser ignore-regex = https://(\w+\.)+ diff --git a/test_fast.bat b/test_fast.bat index 34c61fea08ab4..642e0549f3228 100644 --- a/test_fast.bat +++ b/test_fast.bat @@ -1,3 +1,3 @@ :: test on windows set PYTHONHASHSEED=314159265 -pytest --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sXX --strict-markers pandas +pytest --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sXX pandas diff --git a/test_fast.sh b/test_fast.sh index 6444b81b3c6da..9d446964cf501 100755 --- a/test_fast.sh +++ b/test_fast.sh @@ -5,4 +5,4 @@ # https://github.com/pytest-dev/pytest/issues/1075 export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))') -pytest pandas --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sxX --strict-markers "$@" +pytest pandas --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sxX "$@"
According to https://docs.pytest.org/en/stable/customize.html, using `setup.cfg`as pytest config file is not recommended. So moved everything to `pyproject.toml`
https://api.github.com/repos/pandas-dev/pandas/pulls/40656
2021-03-27T14:23:24Z
2021-04-25T11:57:19Z
2021-04-25T11:57:19Z
2022-11-18T02:21:44Z
[ENH] introducing IntpHashMap and making unique_label_indices use intp
diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index bf7df5776896b..9c1de67a7ba2a 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -192,6 +192,7 @@ class UInt16HashTable(HashTable): ... class UInt8HashTable(HashTable): ... class StringHashTable(HashTable): ... class PyObjectHashTable(HashTable): ... +class IntpHashTable(HashTable): ... def duplicated_int64( values: np.ndarray, # const int64_t[:] values diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 3eb7bcc673cd4..6e97c13c644cf 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -65,6 +65,18 @@ cdef Py_ssize_t _INIT_VEC_CAP = 128 include "hashtable_class_helper.pxi" include "hashtable_func_helper.pxi" + +# map derived hash-map types onto basic hash-map types: +if np.dtype(np.intp) == np.dtype(np.int64): + IntpHashTable = Int64HashTable + unique_label_indices = _unique_label_indices_int64 +elif np.dtype(np.intp) == np.dtype(np.int32): + IntpHashTable = Int32HashTable + unique_label_indices = _unique_label_indices_int32 +else: + raise ValueError(np.dtype(np.intp)) + + cdef class Factorizer: cdef readonly: Py_ssize_t count @@ -168,38 +180,3 @@ cdef class Int64Factorizer(Factorizer): self.count = len(self.uniques) return labels - - -@cython.wraparound(False) -@cython.boundscheck(False) -def unique_label_indices(const int64_t[:] labels) -> ndarray: - """ - Indices of the first occurrences of the unique labels - *excluding* -1. equivalent to: - np.unique(labels, return_index=True)[1] - """ - cdef: - int ret = 0 - Py_ssize_t i, n = len(labels) - kh_int64_t *table = kh_init_int64() - Int64Vector idx = Int64Vector() - ndarray[int64_t, ndim=1] arr - Int64VectorData *ud = idx.data - - kh_resize_int64(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT)) - - with nogil: - for i in range(n): - kh_put_int64(table, labels[i], &ret) - if ret != 0: - if needs_resize(ud): - with gil: - idx.resize() - append_data_int64(ud, i) - - kh_destroy_int64(table) - - arr = idx.to_array() - arr = arr[np.asarray(labels)[arr].argsort()] - - return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index ceb473a0b06af..fb8ce79a924a4 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -470,3 +470,51 @@ cpdef mode(ndarray[htfunc_t] values, bint dropna): else: raise TypeError(values.dtype) + + +{{py: + +# name, dtype, ttype, c_type +dtypes = [('Int64', 'int64', 'int64', 'int64_t'), + ('Int32', 'int32', 'int32', 'int32_t'), ] + +}} + +{{for name, dtype, ttype, c_type in dtypes}} + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _unique_label_indices_{{dtype}}(const {{c_type}}[:] labels) -> ndarray: + """ + Indices of the first occurrences of the unique labels + *excluding* -1. equivalent to: + np.unique(labels, return_index=True)[1] + """ + cdef: + int ret = 0 + Py_ssize_t i, n = len(labels) + kh_{{ttype}}_t *table = kh_init_{{ttype}}() + {{name}}Vector idx = {{name}}Vector() + ndarray[{{c_type}}, ndim=1] arr + {{name}}VectorData *ud = idx.data + + kh_resize_{{ttype}}(table, min(kh_needed_n_buckets(n), SIZE_HINT_LIMIT)) + + with nogil: + for i in range(n): + kh_put_{{ttype}}(table, labels[i], &ret) + if ret != 0: + if needs_resize(ud): + with gil: + idx.resize() + append_data_{{ttype}}(ud, i) + + kh_destroy_{{ttype}}(table) + + arr = idx.to_array() + arr = arr[np.asarray(labels)[arr].argsort()] + + return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr + +{{endfor}} diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index ccb51a0ea2132..a8348b0c5773f 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -261,8 +261,7 @@ def decons_obs_group_ids( out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)] - # TODO: unique_label_indices only used here, should take ndarray[np.intp] - indexer = unique_label_indices(ensure_int64(comp_ids)) + indexer = unique_label_indices(comp_ids) return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels] diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index 5ff20051da8c0..8b7304a84c27b 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -44,6 +44,7 @@ def get_allocated_khash_memory(): (ht.UInt16HashTable, np.uint16), (ht.Int8HashTable, np.int8), (ht.UInt8HashTable, np.uint8), + (ht.IntpHashTable, np.intp), ], ) class TestHashTable: @@ -389,6 +390,7 @@ def get_ht_function(fun_name, type_suffix): (np.uint16, "uint16"), (np.int8, "int8"), (np.uint8, "uint8"), + (np.intp, "intp"), ], ) class TestHelpFunctions: @@ -471,6 +473,14 @@ def test_modes_with_nans(): assert np.isnan(modes[0]) +def test_unique_label_indices_intp(writable): + keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp) + keys.flags.writeable = writable + result = ht.unique_label_indices(keys) + expected = np.array([0, 1, 5], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( "dtype, type_suffix", [ diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 5488c076554fd..c55f673e4f3e4 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1741,7 +1741,7 @@ def test_quantile(): def test_unique_label_indices(): - a = np.random.randint(1, 1 << 10, 1 << 15).astype("int64") + a = np.random.randint(1, 1 << 10, 1 << 15).astype(np.intp) left = ht.unique_label_indices(a) right = np.unique(a, return_index=True)[1]
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Introduces IntpHashMap. Instead of making it from the scratch, we map the functionality either to Int32HashMap or Int64HashMap depending on the plattform. This has the advantage, that the resulting so/pyd-files don't get bigger.
https://api.github.com/repos/pandas-dev/pandas/pulls/40653
2021-03-27T06:47:28Z
2021-10-07T00:36:48Z
2021-10-07T00:36:47Z
2021-10-07T00:37:01Z
PERF/BUG: use masked algo in groupby cummin and cummax
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index b4b20553ec460..27761ccd0d917 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -505,6 +505,34 @@ def time_frame_agg(self, dtype, method): self.df.groupby("key").agg(method) +class CumminMax: + param_names = ["dtype", "method"] + params = [ + ["float64", "int64", "Float64", "Int64"], + ["cummin", "cummax"], + ] + + def setup(self, dtype, method): + N = 500_000 + vals = np.random.randint(-10, 10, (N, 5)) + null_vals = vals.astype(float, copy=True) + null_vals[::2, :] = np.nan + null_vals[::3, :] = np.nan + df = DataFrame(vals, columns=list("abcde"), dtype=dtype) + null_df = DataFrame(null_vals, columns=list("abcde"), dtype=dtype) + keys = np.random.randint(0, 100, size=N) + df["key"] = keys + null_df["key"] = keys + self.df = df + self.null_df = null_df + + def time_frame_transform(self, dtype, method): + self.df.groupby("key").transform(method) + + def time_frame_transform_many_nulls(self, dtype, method): + self.null_df.groupby("key").transform(method) + + class RankWithTies: # GH 21237 param_names = ["dtype", "tie_method"] diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a56260d6658f3..1024154d3a109 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -627,6 +627,8 @@ Performance improvements - Performance improvement in :meth:`core.window.ewm.ExponentialMovingWindow.mean` with ``times`` (:issue:`39784`) - Performance improvement in :meth:`.GroupBy.apply` when requiring the python fallback implementation (:issue:`40176`) - Performance improvement for concatenation of data with type :class:`CategoricalDtype` (:issue:`40193`) +- Performance improvement in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable data types (:issue:`37493`) +- .. --------------------------------------------------------------------------- @@ -835,6 +837,7 @@ Groupby/resample/rolling - Bug in :meth:`.GroupBy.any` and :meth:`.GroupBy.all` raising ``ValueError`` when using with nullable type columns holding ``NA`` even with ``skipna=True`` (:issue:`40585`) - Bug in :meth:`GroupBy.cummin` and :meth:`GroupBy.cummax` incorrectly rounding integer values near the ``int64`` implementations bounds (:issue:`40767`) - Bug in :meth:`.GroupBy.rank` with nullable dtypes incorrectly raising ``TypeError`` (:issue:`41010`) +- Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` computing wrong result with nullable data types too large to roundtrip when casting to float (:issue:`37493`) Reshaping ^^^^^^^^^ diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 8fb307150a48f..3fa92ce2229c3 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1277,6 +1277,7 @@ def group_min(groupby_t[:, ::1] out, @cython.wraparound(False) cdef group_cummin_max(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, + uint8_t[:, ::1] mask, const intp_t[:] labels, int ngroups, bint is_datetimelike, @@ -1290,6 +1291,9 @@ cdef group_cummin_max(groupby_t[:, ::1] out, Array to store cummin/max in. values : np.ndarray[groupby_t, ndim=2] Values to take cummin/max of. + mask : np.ndarray[bool] or None + If not None, indices represent missing values, + otherwise the mask will not be used labels : np.ndarray[np.intp] Labels to group by. ngroups : int @@ -1307,11 +1311,14 @@ cdef group_cummin_max(groupby_t[:, ::1] out, cdef: Py_ssize_t i, j, N, K, size groupby_t val, mval - ndarray[groupby_t, ndim=2] accum + groupby_t[:, ::1] accum intp_t lab + bint val_is_nan, use_mask + + use_mask = mask is not None N, K = (<object>values).shape - accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) + accum = np.empty((ngroups, K), dtype=values.dtype) if groupby_t is int64_t: accum[:] = -_int64_max if compute_max else _int64_max elif groupby_t is uint64_t: @@ -1326,11 +1333,29 @@ cdef group_cummin_max(groupby_t[:, ::1] out, if lab < 0: continue for j in range(K): - val = values[i, j] + val_is_nan = False + + if use_mask: + if mask[i, j]: + + # `out` does not need to be set since it + # will be masked anyway + val_is_nan = True + else: + + # If using the mask, we can avoid grabbing the + # value unless necessary + val = values[i, j] - if _treat_as_na(val, is_datetimelike): - out[i, j] = val + # Otherwise, `out` must be set accordingly if the + # value is missing else: + val = values[i, j] + if _treat_as_na(val, is_datetimelike): + val_is_nan = True + out[i, j] = val + + if not val_is_nan: mval = accum[lab, j] if compute_max: if val > mval: @@ -1347,9 +1372,18 @@ def group_cummin(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, const intp_t[:] labels, int ngroups, - bint is_datetimelike) -> None: + bint is_datetimelike, + uint8_t[:, ::1] mask=None) -> None: """See group_cummin_max.__doc__""" - group_cummin_max(out, values, labels, ngroups, is_datetimelike, compute_max=False) + group_cummin_max( + out, + values, + mask, + labels, + ngroups, + is_datetimelike, + compute_max=False + ) @cython.boundscheck(False) @@ -1358,6 +1392,15 @@ def group_cummax(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, const intp_t[:] labels, int ngroups, - bint is_datetimelike) -> None: + bint is_datetimelike, + uint8_t[:, ::1] mask=None) -> None: """See group_cummin_max.__doc__""" - group_cummin_max(out, values, labels, ngroups, is_datetimelike, compute_max=True) + group_cummin_max( + out, + values, + mask, + labels, + ngroups, + is_datetimelike, + compute_max=True + ) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 6eddf8e9e8773..0a9c46f6ed069 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -65,6 +65,10 @@ ) from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.masked import ( + BaseMaskedArray, + BaseMaskedDtype, +) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame @@ -124,6 +128,8 @@ def __init__(self, kind: str, how: str): }, } + _MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax"} + _cython_arity = {"ohlc": 4} # OHLC # Note: we make this a classmethod and pass kind+how so that caching @@ -256,6 +262,9 @@ def get_out_dtype(self, dtype: np.dtype) -> np.dtype: out_dtype = "object" return np.dtype(out_dtype) + def uses_mask(self) -> bool: + return self.how in self._MASKED_CYTHON_FUNCTIONS + class BaseGrouper: """ @@ -619,9 +628,45 @@ def _ea_wrap_cython_operation( f"function is not implemented for this dtype: {values.dtype}" ) + @final + def _masked_ea_wrap_cython_operation( + self, + kind: str, + values: BaseMaskedArray, + how: str, + axis: int, + min_count: int = -1, + **kwargs, + ) -> BaseMaskedArray: + """ + Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's + and cython algorithms which accept a mask. + """ + orig_values = values + + # Copy to ensure input and result masks don't end up shared + mask = values._mask.copy() + arr = values._data + + res_values = self._cython_operation( + kind, arr, how, axis, min_count, mask=mask, **kwargs + ) + dtype = maybe_cast_result_dtype(orig_values.dtype, how) + assert isinstance(dtype, BaseMaskedDtype) + cls = dtype.construct_array_type() + + return cls(res_values.astype(dtype.type, copy=False), mask) + @final def _cython_operation( - self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs + self, + kind: str, + values, + how: str, + axis: int, + min_count: int = -1, + mask: np.ndarray | None = None, + **kwargs, ) -> ArrayLike: """ Returns the values of a cython operation. @@ -645,10 +690,16 @@ def _cython_operation( # if not raise NotImplementedError cy_op.disallow_invalid_ops(dtype, is_numeric) + func_uses_mask = cy_op.uses_mask() if is_extension_array_dtype(dtype): - return self._ea_wrap_cython_operation( - kind, values, how, axis, min_count, **kwargs - ) + if isinstance(values, BaseMaskedArray) and func_uses_mask: + return self._masked_ea_wrap_cython_operation( + kind, values, how, axis, min_count, **kwargs + ) + else: + return self._ea_wrap_cython_operation( + kind, values, how, axis, min_count, **kwargs + ) elif values.ndim == 1: # expand to 2d, dispatch, then squeeze if appropriate @@ -659,6 +710,7 @@ def _cython_operation( how=how, axis=1, min_count=min_count, + mask=mask, **kwargs, ) if res.shape[0] == 1: @@ -688,6 +740,9 @@ def _cython_operation( assert axis == 1 values = values.T + if mask is not None: + mask = mask.reshape(values.shape, order="C") + out_shape = cy_op.get_output_shape(ngroups, values) func, values = cy_op.get_cython_func_and_vals(values, is_numeric) out_dtype = cy_op.get_out_dtype(values.dtype) @@ -708,7 +763,18 @@ def _cython_operation( func(result, counts, values, comp_ids, min_count) elif kind == "transform": # TODO: min_count - func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs) + if func_uses_mask: + func( + result, + values, + comp_ids, + ngroups, + is_datetimelike, + mask=mask, + **kwargs, + ) + else: + func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs) if kind == "aggregate": # i.e. counts is defined. Locations where count<min_count diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 46985ff956788..f47fc1f4e4a4f 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -22,20 +22,31 @@ @pytest.fixture( - params=[np.int32, np.int64, np.float32, np.float64], - ids=["np.int32", "np.int64", "np.float32", "np.float64"], + params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"], + ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"], ) -def numpy_dtypes_for_minmax(request): +def dtypes_for_minmax(request): """ - Fixture of numpy dtypes with min and max values used for testing + Fixture of dtypes with min and max values used for testing cummin and cummax """ dtype = request.param + + np_type = dtype + if dtype == "Int64": + np_type = np.int64 + elif dtype == "Float64": + np_type = np.float64 + min_val = ( - np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min + np.iinfo(np_type).min + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).min ) max_val = ( - np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max + np.iinfo(np_type).max + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).max ) return (dtype, min_val, max_val) @@ -727,9 +738,9 @@ def test_numpy_compat(func): getattr(g, func)(foo=1) -def test_cummin(numpy_dtypes_for_minmax): - dtype = numpy_dtypes_for_minmax[0] - min_val = numpy_dtypes_for_minmax[1] +def test_cummin(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + min_val = dtypes_for_minmax[1] # GH 15048 base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) @@ -775,19 +786,24 @@ def test_cummin(numpy_dtypes_for_minmax): tm.assert_series_equal(result, expected) -def test_cummin_all_nan_column(): +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"]) +def test_cummin_max_all_nan_column(method, dtype): base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) + base_df["B"] = base_df["B"].astype(dtype) + grouped = base_df.groupby("A") - expected = DataFrame({"B": [np.nan] * 8}) - result = base_df.groupby("A").cummin() + expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype) + result = getattr(grouped, method)() tm.assert_frame_equal(expected, result) - result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() + + result = getattr(grouped["B"], method)().to_frame() tm.assert_frame_equal(expected, result) -def test_cummax(numpy_dtypes_for_minmax): - dtype = numpy_dtypes_for_minmax[0] - max_val = numpy_dtypes_for_minmax[2] +def test_cummax(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + max_val = dtypes_for_minmax[2] # GH 15048 base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) @@ -831,14 +847,20 @@ def test_cummax(numpy_dtypes_for_minmax): tm.assert_series_equal(result, expected) -def test_cummax_all_nan_column(): - base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) +@td.skip_if_32bit +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize( + "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2 ** 53 + 1)] +) +def test_nullable_int_not_cast_as_float(method, dtype, val): + data = [val, pd.NA] + df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype) + grouped = df.groupby("grp") - expected = DataFrame({"B": [np.nan] * 8}) - result = base_df.groupby("A").cummax() - tm.assert_frame_equal(expected, result) - result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() - tm.assert_frame_equal(expected, result) + result = grouped.transform(method) + expected = DataFrame({"b": data}, dtype=dtype) + + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize(
Potential first step towards #37493 ASV's (on `N = 5_000_000` instead of `N = 1_000_000` on the added benchmark to get more stable results): <details> ``` before after ratio [3a973b6d] [2fa80ad0] <master> <perf/masked_cummin/max> - 297±10ms 241±9ms 0.81 groupby.CumminMax.time_frame_transform('Float64', 'cummax') - 284±6ms 240±7ms 0.85 groupby.CumminMax.time_frame_transform('Float64', 'cummin') - 761±60ms 393±4ms 0.52 groupby.CumminMax.time_frame_transform('Int64', 'cummax') - 753±50ms 390±3ms 0.52 groupby.CumminMax.time_frame_transform('Int64', 'cummin') 481±20ms 474±10ms 0.99 groupby.CumminMax.time_frame_transform('float64', 'cummax') 461±10ms 459±10ms 1.00 groupby.CumminMax.time_frame_transform('float64', 'cummin') 706±40ms 732±50ms 1.04 groupby.CumminMax.time_frame_transform('int64', 'cummax') 730±20ms 674±20ms 0.92 groupby.CumminMax.time_frame_transform('int64', 'cummin') - 685±30ms 214±5ms 0.31 groupby.CumminMax.time_frame_transform_many_nulls('Float64', 'cummax') - 698±20ms 225±4ms 0.32 groupby.CumminMax.time_frame_transform_many_nulls('Float64', 'cummin') - 811±50ms 204±6ms 0.25 groupby.CumminMax.time_frame_transform_many_nulls('Int64', 'cummax') - 804±50ms 207±6ms 0.26 groupby.CumminMax.time_frame_transform_many_nulls('Int64', 'cummin') 444±20ms 447±4ms 1.01 groupby.CumminMax.time_frame_transform_many_nulls('float64', 'cummax') 447±20ms 459±20ms 1.03 groupby.CumminMax.time_frame_transform_many_nulls('float64', 'cummin') 1.41±0.1s 1.33±0.01s 0.94 groupby.CumminMax.time_frame_transform_many_nulls('int64', 'cummax') 1.41±0.09s 1.33±0s 0.94 groupby.CumminMax.time_frame_transform_many_nulls('int64', 'cummin') ``` </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/40651
2021-03-27T00:28:20Z
2021-04-21T12:39:20Z
2021-04-21T12:39:20Z
2021-04-21T12:57:52Z
BUG: read_excel failed with empty rows after MultiIndex header
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 2b0b62ab7facf..96a702622b1dc 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -750,6 +750,7 @@ I/O - Bug in :func:`read_hdf` returning unexpected records when filtering on categorical string columns using ``where`` parameter (:issue:`39189`) - Bug in :func:`read_sas` raising ``ValueError`` when ``datetimes`` were null (:issue:`39725`) - Bug in :func:`read_excel` dropping empty values from single-column spreadsheets (:issue:`39808`) +- Bug in :func:`read_excel` raising ``AttributeError`` with ``MultiIndex`` header followed by two empty rows and no index, and bug affecting :func:`read_excel`, :func:`read_csv`, :func:`read_table`, :func:`read_fwf`, and :func:`read_clipboard` where one blank row after a ``MultiIndex`` header with no index would be dropped (:issue:`40442`) - Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`) Period diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 153ac4b5f0893..1a5ac31cc821b 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -707,7 +707,8 @@ cdef class TextReader: ic = (len(self.index_col) if self.index_col is not None else 0) - if lc != unnamed_count and lc - ic > unnamed_count: + # if wrong number of blanks or no index, not our format + if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: hr -= 1 self.parser_start -= 1 this_header = [None] * lc diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 7eefd26b194ab..673c023325e3e 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -551,7 +551,11 @@ def parse( header_name, _ = pop_header_name(data[row], index_col) header_names.append(header_name) - has_index_names = is_list_like(header) and len(header) > 1 + # If there is a MultiIndex header and an index then there is also + # a row containing just the index name(s) + has_index_names = ( + is_list_like(header) and len(header) > 1 and index_col is not None + ) if is_list_like(index_col): # Forward fill values for MultiIndex index. diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 37f553c724c9e..cbb0dd68ef038 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -431,7 +431,8 @@ def _infer_columns(self): ic = len(self.index_col) if self.index_col is not None else 0 unnamed_count = len(this_unnamed_cols) - if lc != unnamed_count and lc - ic > unnamed_count: + # if wrong number of blanks or no index, not our format + if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: clear_buffer = False this_columns = [None] * lc self.buf = [self.buf[-1]] diff --git a/pandas/tests/io/data/excel/testmultiindex.ods b/pandas/tests/io/data/excel/testmultiindex.ods index deb88bdad1694..dca8d70abdc24 100644 Binary files a/pandas/tests/io/data/excel/testmultiindex.ods and b/pandas/tests/io/data/excel/testmultiindex.ods differ diff --git a/pandas/tests/io/data/excel/testmultiindex.xls b/pandas/tests/io/data/excel/testmultiindex.xls index 08dc78ea34d56..c91698be29b13 100644 Binary files a/pandas/tests/io/data/excel/testmultiindex.xls and b/pandas/tests/io/data/excel/testmultiindex.xls differ diff --git a/pandas/tests/io/data/excel/testmultiindex.xlsb b/pandas/tests/io/data/excel/testmultiindex.xlsb index f5f62d305640f..a693e0c66afc2 100644 Binary files a/pandas/tests/io/data/excel/testmultiindex.xlsb and b/pandas/tests/io/data/excel/testmultiindex.xlsb differ diff --git a/pandas/tests/io/data/excel/testmultiindex.xlsm b/pandas/tests/io/data/excel/testmultiindex.xlsm index 8bd16b016608c..5a2a4ea35f0d9 100644 Binary files a/pandas/tests/io/data/excel/testmultiindex.xlsm and b/pandas/tests/io/data/excel/testmultiindex.xlsm differ diff --git a/pandas/tests/io/data/excel/testmultiindex.xlsx b/pandas/tests/io/data/excel/testmultiindex.xlsx index 56fc6f20b711a..a6174445bb83a 100644 Binary files a/pandas/tests/io/data/excel/testmultiindex.xlsx and b/pandas/tests/io/data/excel/testmultiindex.xlsx differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 382c8412ab050..c4b3221e1d3a7 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1193,6 +1193,17 @@ def test_one_col_noskip_blank_line(self, read_ext): result = pd.read_excel(file_name) tm.assert_frame_equal(result, expected) + def test_multiheader_two_blank_lines(self, read_ext): + # GH 40442 + file_name = "testmultiindex" + read_ext + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]] + expected = DataFrame(data, columns=columns) + result = pd.read_excel( + file_name, sheet_name="mi_column_empty_rows", header=[0, 1] + ) + tm.assert_frame_equal(result, expected) + class TestExcelFileRead: @pytest.fixture(autouse=True) diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index f15fc16fbce38..3b814360d3aa4 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -389,6 +389,17 @@ def test_header_multi_index_common_format_malformed3(all_parsers): tm.assert_frame_equal(expected, result) +def test_header_multi_index_blank_line(all_parsers): + # GH 40442 + parser = all_parsers + data = [[None, None], [1, 2], [3, 4]] + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + expected = DataFrame(data, columns=columns) + data = "a,b\nA,B\n,\n1,2\n3,4" + result = parser.read_csv(StringIO(data), header=[0, 1]) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize( "data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)] )
- [x] closes #40442 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Prior to this fix, a blank data row after a MultiIndex header was a interpreted as containing a blank index name, but that only works if the user has specified an index column. If index_col is None all subsequent rows should be treated as data, even if the first one is empty.
https://api.github.com/repos/pandas-dev/pandas/pulls/40649
2021-03-26T17:49:29Z
2021-04-23T00:07:08Z
2021-04-23T00:07:08Z
2021-04-23T17:23:05Z
Backport PR #40604: REGR: replace with multivalued regex raising
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 45d131327630e..26d768f830830 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) - Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) +- Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 5bc820e76bff1..b6bca855a9f05 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -894,10 +894,20 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): + convert = i == src_len # only convert once at the end new_rb: List["Block"] = [] - for blk in rb: - m = masks[i] - convert = i == src_len # only convert once at the end + + # GH-39338: _replace_coerce can split a block into + # single-column blocks, so track the index so we know + # where to index into the mask + for blk_num, blk in enumerate(rb): + if len(rb) == 1: + m = masks[i] + else: + mib = masks[i] + assert not isinstance(mib, bool) + m = mib[blk_num : blk_num + 1] + result = blk._replace_coerce( to_replace=src, value=dest, diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 1b570028964df..c4f2e09911b34 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -644,6 +644,28 @@ def test_regex_replace_numeric_to_object_conversion(self, mix_abc): tm.assert_frame_equal(res, expec) assert res.a.dtype == np.object_ + @pytest.mark.parametrize( + "to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}] + ) + def test_joint_simple_replace_and_regex_replace(self, to_replace): + # GH-39338 + df = DataFrame( + { + "col1": ["1,000", "a", "3"], + "col2": ["a", "", "b"], + "col3": ["a", "b", "c"], + } + ) + result = df.replace(regex=to_replace) + expected = DataFrame( + { + "col1": ["1000", "a", "3"], + "col2": ["a", np.nan, "b"], + "col3": ["a", "b", "c"], + } + ) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"]) def test_replace_regex_metachar(self, metachar): df = DataFrame({"a": [metachar, "else"]})
Backport PR #40604
https://api.github.com/repos/pandas-dev/pandas/pulls/40648
2021-03-26T11:23:41Z
2021-03-26T13:04:25Z
2021-03-26T13:04:25Z
2021-03-26T13:18:07Z
TYP: fix ignores in core.groupby
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b407212fe6a50..74b79e82d033f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -356,7 +356,7 @@ def _aggregate_multiple_funcs(self, arg): # TODO: index should not be Optional - see GH 35490 def _wrap_series_output( self, - output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + output: Mapping[base.OutputKey, Union[Series, ArrayLike]], index: Optional[Index], ) -> FrameOrSeriesUnion: """ @@ -364,7 +364,7 @@ def _wrap_series_output( Parameters ---------- - output : Mapping[base.OutputKey, Union[Series, np.ndarray]] + output : Mapping[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]] Data to wrap. index : pd.Index or None Index to apply to the output. @@ -421,14 +421,14 @@ def _wrap_aggregated_output( return self._reindex_output(result) def _wrap_transformed_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, output: Mapping[base.OutputKey, Union[Series, ArrayLike]] ) -> Series: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. Parameters ---------- - output : dict[base.OutputKey, Union[Series, np.ndarray]] + output : dict[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]] Dict with a sole key of 0 and a value of the result values. Returns @@ -1121,6 +1121,7 @@ def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike: if isinstance(values, Categorical) and isinstance(result, np.ndarray): # If the Categorical op didn't raise, it is dtype-preserving + # We get here with how="first", "last", "min", "max" result = type(values)._from_sequence(result.ravel(), dtype=values.dtype) # Note this will have result.dtype == dtype from above @@ -1197,9 +1198,7 @@ def array_func(values: ArrayLike) -> ArrayLike: assert how == "ohlc" raise - # error: Incompatible types in assignment (expression has type - # "ExtensionArray", variable has type "ndarray") - result = py_fallback(values) # type: ignore[assignment] + result = py_fallback(values) return cast_agg_result(result, values, how) @@ -1755,14 +1754,14 @@ def _wrap_aggregated_output( return self._reindex_output(result) def _wrap_transformed_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, output: Mapping[base.OutputKey, Union[Series, ArrayLike]] ) -> DataFrame: """ Wraps the output of DataFrameGroupBy transformations into the expected result. Parameters ---------- - output : Mapping[base.OutputKey, Union[Series, np.ndarray]] + output : Mapping[base.OutputKey, Union[Series, np.ndarray, ExtensionArray]] Data to wrap. Returns diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f33833193e4e0..51f41fb789258 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -84,7 +84,6 @@ class providing the base-class of operations. import pandas.core.algorithms as algorithms from pandas.core.arrays import ( Categorical, - DatetimeArray, ExtensionArray, ) from pandas.core.base import ( @@ -1026,7 +1025,7 @@ def _cumcount_array(self, ascending: bool = True): def _cython_transform( self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs ): - output: Dict[base.OutputKey, np.ndarray] = {} + output: Dict[base.OutputKey, ArrayLike] = {} for idx, obj in enumerate(self._iterate_slices()): name = obj.name @@ -1054,7 +1053,7 @@ def _wrap_aggregated_output( ): raise AbstractMethodError(self) - def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]): + def _wrap_transformed_output(self, output: Mapping[base.OutputKey, ArrayLike]): raise AbstractMethodError(self) def _wrap_applied_output(self, data, keys, values, not_indexed_same: bool = False): @@ -1099,7 +1098,7 @@ def _agg_general( def _cython_agg_general( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 ): - output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {} + output: Dict[base.OutputKey, ArrayLike] = {} # Ideally we would be able to enumerate self._iterate_slices and use # the index from enumeration as the key of output, but ohlc in particular # returns a (n x 4) array. Output requires 1D ndarrays as values, so we diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c5d36d1588a5f..467c9948cd9ae 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -31,6 +31,7 @@ import pandas._libs.groupby as libgroupby import pandas._libs.reduction as libreduction from pandas._typing import ( + ArrayLike, DtypeObj, F, FrameOrSeries, @@ -525,7 +526,7 @@ def _disallow_invalid_ops( @final def _ea_wrap_cython_operation( self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs - ) -> np.ndarray: + ) -> ArrayLike: """ If we have an ExtensionArray, unwrap, call _cython_operation, and re-wrap if appropriate. @@ -577,7 +578,7 @@ def _ea_wrap_cython_operation( @final def _cython_operation( self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs - ) -> np.ndarray: + ) -> ArrayLike: """ Returns the values of a cython operation. """ @@ -684,11 +685,11 @@ def _cython_operation( # e.g. if we are int64 and need to restore to datetime64/timedelta64 # "rank" is the only member of cython_cast_blocklist we get here dtype = maybe_cast_result_dtype(orig_values.dtype, how) - # error: Incompatible types in assignment (expression has type - # "Union[ExtensionArray, ndarray]", variable has type "ndarray") - result = maybe_downcast_to_dtype(result, dtype) # type: ignore[assignment] + op_result = maybe_downcast_to_dtype(result, dtype) + else: + op_result = result - return result + return op_result @final def _aggregate( @@ -785,14 +786,10 @@ def _aggregate_series_pure_python(self, obj: Series, func: F): counts[label] = group.shape[0] result[label] = res - result = lib.maybe_convert_objects(result, try_float=False) - # error: Incompatible types in assignment (expression has type - # "Union[ExtensionArray, ndarray]", variable has type "ndarray") - result = maybe_cast_result( # type: ignore[assignment] - result, obj, numeric_only=True - ) + out = lib.maybe_convert_objects(result, try_float=False) + out = maybe_cast_result(out, obj, numeric_only=True) - return result, counts + return out, counts class BinGrouper(BaseGrouper):
cc @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/40643
2021-03-25T23:18:38Z
2021-03-26T20:20:21Z
2021-03-26T20:20:21Z
2021-03-26T22:01:33Z
TYP: IndexOpsMixin
diff --git a/pandas/core/base.py b/pandas/core/base.py index f30430dd394ca..18fc76fe79a5a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -24,6 +24,7 @@ DtypeObj, IndexLabel, Shape, + final, ) from pandas.compat import PYPY from pandas.compat.numpy import function as nv @@ -933,12 +934,8 @@ def _map_values(self, mapper, na_action=None): # use the built in categorical series mapper which saves # time by mapping the categories instead of all values - # error: Incompatible types in assignment (expression has type - # "Categorical", variable has type "IndexOpsMixin") - self = cast("Categorical", self) # type: ignore[assignment] - # error: Item "ExtensionArray" of "Union[ExtensionArray, Any]" has no - # attribute "map" - return self._values.map(mapper) # type: ignore[union-attr] + cat = cast("Categorical", self._values) + return cat.map(mapper) values = self._values @@ -955,8 +952,7 @@ def _map_values(self, mapper, na_action=None): raise NotImplementedError map_f = lambda values, f: values.map(f) else: - # error: "IndexOpsMixin" has no attribute "astype" - values = self.astype(object)._values # type: ignore[attr-defined] + values = self._values.astype(object) if na_action == "ignore": map_f = lambda values, f: lib.map_infer_mask( values, f, isna(values).view(np.uint8) @@ -1327,9 +1323,10 @@ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray: return algorithms.searchsorted(self._values, value, side=side, sorter=sorter) def drop_duplicates(self, keep="first"): - duplicated = self.duplicated(keep=keep) + duplicated = self._duplicated(keep=keep) # error: Value of type "IndexOpsMixin" is not indexable return self[~duplicated] # type: ignore[index] - def duplicated(self, keep: Union[str, bool] = "first") -> np.ndarray: + @final + def _duplicated(self, keep: Union[str, bool] = "first") -> np.ndarray: return duplicated(self._values, keep=keep) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index af3315dd2ade6..fc4eeebc86642 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2663,7 +2663,7 @@ def duplicated(self, keep: Union[str_t, bool] = "first") -> np.ndarray: Returns ------- - numpy.ndarray + np.ndarray[bool] See Also -------- @@ -2699,7 +2699,7 @@ def duplicated(self, keep: Union[str_t, bool] = "first") -> np.ndarray: if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) - return super().duplicated(keep=keep) + return self._duplicated(keep=keep) def _get_unique_index(self: _IndexT) -> _IndexT: """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 76878d0a0b82a..fedb955ce83b9 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1614,12 +1614,16 @@ def _inferred_type_levels(self) -> List[str]: return [i.inferred_type for i in self.levels] @doc(Index.duplicated) - def duplicated(self, keep="first"): + def duplicated(self, keep="first") -> np.ndarray: shape = map(len, self.levels) ids = get_group_index(self.codes, shape, sort=False, xnull=False) return duplicated_int64(ids, keep) + # error: Cannot override final attribute "_duplicated" + # (previously declared in base class "IndexOpsMixin") + _duplicated = duplicated # type: ignore[misc] + def fillna(self, value=None, downcast=None): """ fillna is not implemented for MultiIndex @@ -2216,11 +2220,7 @@ def drop(self, codes, level=None, errors="raise"): if not isinstance(codes, (np.ndarray, Index)): try: - # error: Argument "dtype" to "index_labels_to_array" has incompatible - # type "Type[object]"; expected "Union[str, dtype[Any], None]" - codes = com.index_labels_to_array( - codes, dtype=object # type: ignore[arg-type] - ) + codes = com.index_labels_to_array(codes, dtype=np.dtype("object")) except ValueError: pass diff --git a/pandas/core/series.py b/pandas/core/series.py index 27042f7de9dc1..641a57a554a9b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1874,7 +1874,7 @@ def mode(self, dropna=True) -> Series: # TODO: Add option for bins like value_counts() return algorithms.mode(self, dropna=dropna) - def unique(self): + def unique(self) -> ArrayLike: """ Return unique values of Series object. @@ -2020,9 +2020,7 @@ def drop_duplicates(self, keep="first", inplace=False) -> Optional[Series]: else: return result - # error: Return type "Series" of "duplicated" incompatible with return type - # "ndarray" in supertype "IndexOpsMixin" - def duplicated(self, keep="first") -> Series: # type: ignore[override] + def duplicated(self, keep="first") -> Series: """ Indicate duplicate Series values. @@ -2043,7 +2041,7 @@ def duplicated(self, keep="first") -> Series: # type: ignore[override] Returns ------- - Series + Series[bool] Series indicating whether each value has occurred in the preceding values. @@ -2098,7 +2096,7 @@ def duplicated(self, keep="first") -> Series: # type: ignore[override] 4 True dtype: bool """ - res = base.IndexOpsMixin.duplicated(self, keep=keep) + res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index) return result.__finalize__(self, method="duplicated")
https://api.github.com/repos/pandas-dev/pandas/pulls/40642
2021-03-25T23:04:18Z
2021-03-30T12:56:56Z
2021-03-30T12:56:56Z
2021-11-20T23:23:35Z
TST: move Styler format tests to own module
diff --git a/pandas/tests/io/formats/style/test_format.py b/pandas/tests/io/formats/style/test_format.py new file mode 100644 index 0000000000000..09b18e1f71d76 --- /dev/null +++ b/pandas/tests/io/formats/style/test_format.py @@ -0,0 +1,199 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + NaT, + Timestamp, +) +import pandas._testing as tm + +pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler + + +@pytest.fixture +def df(): + return DataFrame( + data=[[0, -0.609], [1, -1.228]], + columns=["A", "B"], + index=["x", "y"], + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +def test_display_format(styler): + ctx = styler.format("{:0.1f}")._translate() + assert all(["display_value" in c for c in row] for row in ctx["body"]) + assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"]) + assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 + + +def test_format_dict(styler): + ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate() + assert ctx["body"][0][1]["display_value"] == "0.0" + assert ctx["body"][0][2]["display_value"] == "-60.90%" + + +def test_format_string(styler): + ctx = styler.format("{:.2f}")._translate() + assert ctx["body"][0][1]["display_value"] == "0.00" + assert ctx["body"][0][2]["display_value"] == "-0.61" + assert ctx["body"][1][1]["display_value"] == "1.00" + assert ctx["body"][1][2]["display_value"] == "-1.23" + + +def test_format_callable(styler): + ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate() + assert ctx["body"][0][1]["display_value"] == "pos" + assert ctx["body"][0][2]["display_value"] == "neg" + assert ctx["body"][1][1]["display_value"] == "pos" + assert ctx["body"][1][2]["display_value"] == "neg" + + +def test_format_with_na_rep(): + # GH 21527 28358 + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + + ctx = df.style.format(None, na_rep="-")._translate() + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + + ctx = df.style.format("{:.2%}", na_rep="-")._translate() + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "110.00%" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate() + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + +def test_format_non_numeric_na(): + # GH 21527 28358 + df = DataFrame( + { + "object": [None, np.nan, "foo"], + "datetime": [None, NaT, Timestamp("20120101")], + } + ) + + with tm.assert_produces_warning(FutureWarning): + ctx = df.style.set_na_rep("NA")._translate() + assert ctx["body"][0][1]["display_value"] == "NA" + assert ctx["body"][0][2]["display_value"] == "NA" + assert ctx["body"][1][1]["display_value"] == "NA" + assert ctx["body"][1][2]["display_value"] == "NA" + + ctx = df.style.format(None, na_rep="-")._translate() + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "-" + + +def test_format_clear(styler): + assert (0, 0) not in styler._display_funcs # using default + styler.format("{:.2f") + assert (0, 0) in styler._display_funcs # formatter is specified + styler.format() + assert (0, 0) not in styler._display_funcs # formatter cleared to default + + +def test_format_escape(): + df = DataFrame([['<>&"']]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=False) + expected = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>' + assert expected in s.render() + + # only the value should be escaped before passing to the formatter + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True) + ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' + assert ex in s.render() + + +def test_format_escape_na_rep(): + # tests the na_rep is not escaped + df = DataFrame([['<>&"', None]]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True, na_rep="&") + ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' + expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>' + assert ex in s.render() + assert expected2 in s.render() + + +def test_format_escape_floats(styler): + # test given formatter for number format is not impacted by escape + s = styler.format("{:.1f}", escape=True) + for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]: + assert expected in s.render() + # tests precision of floats is not impacted by escape + s = styler.format(precision=1, escape=True) + for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]: + assert expected in s.render() + + +@pytest.mark.parametrize("formatter", [5, True, [2.0]]) +def test_format_raises(styler, formatter): + with pytest.raises(TypeError, match="expected str or callable"): + styler.format(formatter) + + +def test_format_with_precision(): + # Issue #13257 + df = DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"]) + s = Styler(df) + + ctx = s.format(precision=1)._translate() + assert ctx["body"][0][1]["display_value"] == "1.0" + assert ctx["body"][0][2]["display_value"] == "2.0" + assert ctx["body"][1][1]["display_value"] == "3.2" + assert ctx["body"][1][2]["display_value"] == "4.6" + + ctx = s.format(precision=2)._translate() + assert ctx["body"][0][1]["display_value"] == "1.00" + assert ctx["body"][0][2]["display_value"] == "2.01" + assert ctx["body"][1][1]["display_value"] == "3.21" + assert ctx["body"][1][2]["display_value"] == "4.57" + + ctx = s.format(precision=3)._translate() + assert ctx["body"][0][1]["display_value"] == "1.000" + assert ctx["body"][0][2]["display_value"] == "2.009" + assert ctx["body"][1][1]["display_value"] == "3.212" + assert ctx["body"][1][2]["display_value"] == "4.566" + + +def test_format_subset(): + df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) + ctx = df.style.format( + {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :] + )._translate() + expected = "0.1" + raw_11 = "1.123400" + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + assert ctx["body"][0][2]["display_value"] == "12.34%" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate() + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate() + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][0][2]["display_value"] == "0.123400" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate() + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate() + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == "1.1" + assert ctx["body"][0][2]["display_value"] == "0.123400" + assert ctx["body"][1][2]["display_value"] == raw_11 diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index d5b6724fd15e6..302019b702829 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -575,24 +575,6 @@ def test_duplicate(self): ] assert result == expected - def test_format_with_na_rep(self): - # GH 21527 28358 - df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) - - ctx = df.style.format(None, na_rep="-")._translate() - assert ctx["body"][0][1]["display_value"] == "-" - assert ctx["body"][0][2]["display_value"] == "-" - - ctx = df.style.format("{:.2%}", na_rep="-")._translate() - assert ctx["body"][0][1]["display_value"] == "-" - assert ctx["body"][0][2]["display_value"] == "-" - assert ctx["body"][1][1]["display_value"] == "110.00%" - assert ctx["body"][1][2]["display_value"] == "120.00%" - - ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate() - assert ctx["body"][0][2]["display_value"] == "-" - assert ctx["body"][1][2]["display_value"] == "120.00%" - def test_init_with_na_rep(self): # GH 21527 28358 df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) @@ -619,65 +601,6 @@ def test_set_na_rep(self): assert ctx["body"][0][1]["display_value"] == "NA" assert ctx["body"][0][2]["display_value"] == "-" - def test_format_non_numeric_na(self): - # GH 21527 28358 - df = DataFrame( - { - "object": [None, np.nan, "foo"], - "datetime": [None, pd.NaT, pd.Timestamp("20120101")], - } - ) - - with tm.assert_produces_warning(FutureWarning): - ctx = df.style.set_na_rep("NA")._translate() - assert ctx["body"][0][1]["display_value"] == "NA" - assert ctx["body"][0][2]["display_value"] == "NA" - assert ctx["body"][1][1]["display_value"] == "NA" - assert ctx["body"][1][2]["display_value"] == "NA" - - ctx = df.style.format(None, na_rep="-")._translate() - assert ctx["body"][0][1]["display_value"] == "-" - assert ctx["body"][0][2]["display_value"] == "-" - assert ctx["body"][1][1]["display_value"] == "-" - assert ctx["body"][1][2]["display_value"] == "-" - - def test_format_clear(self): - assert (0, 0) not in self.styler._display_funcs # using default - self.styler.format("{:.2f") - assert (0, 0) in self.styler._display_funcs # formatter is specified - self.styler.format() - assert (0, 0) not in self.styler._display_funcs # formatter cleared to default - - def test_format_escape(self): - df = DataFrame([['<>&"']]) - s = Styler(df, uuid_len=0).format("X&{0}>X", escape=False) - expected = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>' - assert expected in s.render() - - # only the value should be escaped before passing to the formatter - s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True) - ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' - assert ex in s.render() - - def test_format_escape_na_rep(self): - # tests the na_rep is not escaped - df = DataFrame([['<>&"', None]]) - s = Styler(df, uuid_len=0).format("X&{0}>X", escape=True, na_rep="&") - ex = '<td id="T__row0_col0" class="data row0 col0" >X&&lt;&gt;&amp;&#34;>X</td>' - expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>' - assert ex in s.render() - assert expected2 in s.render() - - def test_format_escape_floats(self): - # test given formatter for number format is not impacted by escape - s = self.df.style.format("{:.1f}", escape=True) - for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]: - assert expected in s.render() - # tests precision of floats is not impacted by escape - s = self.df.style.format(precision=1, escape=True) - for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]: - assert expected in s.render() - def test_nonunique_raises(self): df = DataFrame([[1, 2]], columns=["A", "A"]) msg = "style is not supported for non-unique indices." @@ -804,85 +727,6 @@ def test_export(self): assert style1._todo == style2._todo style2.render() - def test_display_format(self): - df = DataFrame(np.random.random(size=(2, 2))) - ctx = df.style.format("{:0.1f}")._translate() - - assert all(["display_value" in c for c in row] for row in ctx["body"]) - assert all( - [len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"] - ) - assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 - - @pytest.mark.parametrize("formatter", [5, True, [2.0]]) - def test_format_raises(self, formatter): - with pytest.raises(TypeError, match="expected str or callable"): - self.df.style.format(formatter) - - def test_format_with_precision(self): - # Issue #13257 - df = DataFrame(data=[[1.0, 2.0090], [3.2121, 4.566]], columns=["a", "b"]) - s = Styler(df) - - ctx = s.format(precision=1)._translate() - assert ctx["body"][0][1]["display_value"] == "1.0" - assert ctx["body"][0][2]["display_value"] == "2.0" - assert ctx["body"][1][1]["display_value"] == "3.2" - assert ctx["body"][1][2]["display_value"] == "4.6" - - ctx = s.format(precision=2)._translate() - assert ctx["body"][0][1]["display_value"] == "1.00" - assert ctx["body"][0][2]["display_value"] == "2.01" - assert ctx["body"][1][1]["display_value"] == "3.21" - assert ctx["body"][1][2]["display_value"] == "4.57" - - ctx = s.format(precision=3)._translate() - assert ctx["body"][0][1]["display_value"] == "1.000" - assert ctx["body"][0][2]["display_value"] == "2.009" - assert ctx["body"][1][1]["display_value"] == "3.212" - assert ctx["body"][1][2]["display_value"] == "4.566" - - def test_format_subset(self): - df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) - ctx = df.style.format( - {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=pd.IndexSlice[0, :] - )._translate() - expected = "0.1" - raw_11 = "1.123400" - assert ctx["body"][0][1]["display_value"] == expected - assert ctx["body"][1][1]["display_value"] == raw_11 - assert ctx["body"][0][2]["display_value"] == "12.34%" - - ctx = df.style.format("{:0.1f}", subset=pd.IndexSlice[0, :])._translate() - assert ctx["body"][0][1]["display_value"] == expected - assert ctx["body"][1][1]["display_value"] == raw_11 - - ctx = df.style.format("{:0.1f}", subset=pd.IndexSlice["a"])._translate() - assert ctx["body"][0][1]["display_value"] == expected - assert ctx["body"][0][2]["display_value"] == "0.123400" - - ctx = df.style.format("{:0.1f}", subset=pd.IndexSlice[0, "a"])._translate() - assert ctx["body"][0][1]["display_value"] == expected - assert ctx["body"][1][1]["display_value"] == raw_11 - - ctx = df.style.format( - "{:0.1f}", subset=pd.IndexSlice[[0, 1], ["a"]] - )._translate() - assert ctx["body"][0][1]["display_value"] == expected - assert ctx["body"][1][1]["display_value"] == "1.1" - assert ctx["body"][0][2]["display_value"] == "0.123400" - assert ctx["body"][1][2]["display_value"] == raw_11 - - def test_format_dict(self): - df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) - ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate() - assert ctx["body"][0][1]["display_value"] == "0.1" - assert ctx["body"][0][2]["display_value"] == "12.34%" - df["c"] = ["aaa", "bbb"] - ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate() - assert ctx["body"][0][1]["display_value"] == "0.1" - assert ctx["body"][0][3]["display_value"] == "AAA" - def test_bad_apply_shape(self): df = DataFrame([[1, 2], [3, 4]]) msg = "returned the wrong shape"
The following tests moved from `style/test_style.py` to their own module `style/test_format.py`. def test_display_format(styler): def test_format_dict(styler): def test_format_string(styler): [added new] def test_format_callable(styler): [added new] def test_format_with_na_rep(): def test_format_non_numeric_na(): def test_format_clear(styler): def test_format_escape(): def test_format_escape_na_rep(): def test_format_escape_floats(styler): def test_format_raises(styler, formatter): def test_format_with_precision(): def test_format_subset(): The new module does not use `class` setup structure.
https://api.github.com/repos/pandas-dev/pandas/pulls/40641
2021-03-25T22:08:41Z
2021-03-26T13:08:09Z
2021-03-26T13:08:09Z
2021-03-27T07:14:53Z
DOC: release notes for 1.3.0 Styler
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 02c2228f013a9..6ea3e94f3df08 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -110,6 +110,31 @@ both XPath 1.0 and XSLT 1.0 is available. (:issue:`27554`) For more, see :ref:`io.xml` in the user guide on IO tools. +Styler Upgrades +^^^^^^^^^^^^^^^ + +We provided some focused development on :class:`.Styler`, including altering methods +to accept more universal CSS language for arguments, such as ``'color:red;'`` instead of +``[('color', 'red')]`` (:issue:`39564`). This is also added to the built-in methods +to allow custom CSS highlighting instead of default background coloring (:issue:`40242`). + +The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to +allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`). + +:meth:`.Styler.set_tooltips` is a new method that allows adding on hover tooltips to +enhance interactive displays (:issue:`35643`). :meth:`.Styler.set_td_classes`, which was recently +introduced in v1.2.0 (:issue:`36159`) to allow adding specific CSS classes to data cells, has +been made as performant as :meth:`.Styler.apply` and :meth:`.Styler.applymap` (:issue:`40453`), +if not more performant in some cases. The overall performance of HTML +render times has been considerably improved to +match :meth:`DataFrame.to_html` (:issue:`39952` :issue:`37792` :issue:`40425`). + +The :meth:`.Styler.format` has had upgrades to easily format missing data, +precision, and perform HTML escaping (:issue:`40437` :issue:`40134`). There have been numerous other bug fixes to +properly format HTML and eliminate some inconsistencies (:issue:`39942` :issue:`40356` :issue:`39807` :issue:`39889` :issue:`39627`) + +Documentation has also seen major revisions in light of new features (:issue:`39720` :issue:`39317` :issue:`40493`) + .. _whatsnew_130.dataframe_honors_copy_with_dict: DataFrame constructor honors ``copy=False`` with dict
Added a note to the enhancements section detailing some different upgrades to `Styler` for 1.3.0 If this is not standard or unnecessary just close this PR - I'm only including it in case it is helpful (it is not for self-publication), so I am completely indifferent! Hopefully there will be one or two more items to add before the end of May.
https://api.github.com/repos/pandas-dev/pandas/pulls/40637
2021-03-25T16:53:49Z
2021-04-02T21:37:23Z
2021-04-02T21:37:23Z
2021-04-03T06:09:46Z
add deltalake to eco system doc page
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index e72a9d86daeaf..47a17f5d0d666 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -368,6 +368,14 @@ far exceeding the performance of the native ``df.to_sql`` method. Internally, it Microsoft's BCP utility, but the complexity is fully abstracted away from the end user. Rigorously tested, it is a complete replacement for ``df.to_sql``. +`Deltalake <https://pypi.org/project/deltalake>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Deltalake python package lets you access tables stored in +`Delta Lake <https://delta.io/>`__ natively in Python without the need to use Spark or +JVM. It provides the ``delta_table.to_pyarrow_table().to_pandas()`` method to convert +any Delta table into Pandas dataframe. + .. _ecosystem.out-of-core:
- [x] relates to #35017 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40636
2021-03-25T15:55:29Z
2021-03-30T02:27:23Z
2021-03-30T02:27:23Z
2021-06-11T03:36:09Z
TYP: libgroupby int64->intp
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 122a014604bf0..b5b90b4987a66 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -490,7 +490,7 @@ def nancorr_kendall(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1) -> ndarra int64_t total_discordant = 0 float64_t kendall_tau int64_t n_obs - const int64_t[:] labels_n + const intp_t[:] labels_n N, K = (<object>mat).shape @@ -499,7 +499,7 @@ def nancorr_kendall(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1) -> ndarra ranked_mat = np.empty((N, K), dtype=np.float64) # For compatibility when calling rank_1d - labels_n = np.zeros(N, dtype=np.int64) + labels_n = np.zeros(N, dtype=np.intp) for i in range(K): ranked_mat[:, i] = rank_1d(mat[:, i], labels_n) @@ -959,7 +959,7 @@ ctypedef fused rank_t: @cython.boundscheck(False) def rank_1d( ndarray[rank_t, ndim=1] values, - const int64_t[:] labels, + const intp_t[:] labels, ties_method="average", bint ascending=True, bint pct=False, @@ -971,7 +971,8 @@ def rank_1d( Parameters ---------- values : array of rank_t values to be ranked - labels : array containing unique label for each group, with its ordering + labels : np.ndarray[np.intp] + Array containing unique label for each group, with its ordering matching up to the corresponding record in `values`. If not called from a groupby operation, will be an array of 0's ties_method : {'average', 'min', 'max', 'first', 'dense'}, default diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 545d6a10232ab..7ddc087df9b11 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -105,7 +105,7 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels, + ndarray[intp_t] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -122,7 +122,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ngroups = len(counts) N, K = (<object>values).shape - indexer, _counts = groupsort_indexer(ensure_platform_int(labels), ngroups) + indexer, _counts = groupsort_indexer(labels, ngroups) counts[:] = _counts[1:] data = np.empty((K, N), dtype=np.float64) @@ -145,7 +145,7 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, @cython.wraparound(False) def group_cumprod_float64(float64_t[:, ::1] out, const float64_t[:, :] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, bint is_datetimelike, bint skipna=True): @@ -158,7 +158,7 @@ def group_cumprod_float64(float64_t[:, ::1] out, Array to store cumprod in. values : float64 array Values to take cumprod of. - labels : int64 array + labels : np.ndarray[np.intp] Labels to group by. ngroups : int Number of groups, larger than all entries of `labels`. @@ -175,7 +175,7 @@ def group_cumprod_float64(float64_t[:, ::1] out, Py_ssize_t i, j, N, K, size float64_t val float64_t[:, ::1] accum - int64_t lab + intp_t lab N, K = (<object>values).shape accum = np.ones((ngroups, K), dtype=np.float64) @@ -202,7 +202,7 @@ def group_cumprod_float64(float64_t[:, ::1] out, @cython.wraparound(False) def group_cumsum(numeric[:, ::1] out, ndarray[numeric, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, is_datetimelike, bint skipna=True): @@ -215,7 +215,7 @@ def group_cumsum(numeric[:, ::1] out, Array to store cumsum in. values : array Values to take cumsum of. - labels : int64 array + labels : np.ndarray[np.intp] Labels to group by. ngroups : int Number of groups, larger than all entries of `labels`. @@ -232,7 +232,7 @@ def group_cumsum(numeric[:, ::1] out, Py_ssize_t i, j, N, K, size numeric val, y, t numeric[:, ::1] accum, compensation - int64_t lab + intp_t lab N, K = (<object>values).shape accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype) @@ -269,12 +269,12 @@ def group_cumsum(numeric[:, ::1] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_shift_indexer(int64_t[::1] out, const int64_t[:] labels, +def group_shift_indexer(int64_t[::1] out, const intp_t[:] labels, int ngroups, int periods): cdef: - Py_ssize_t N, i, j, ii + Py_ssize_t N, i, j, ii, lab int offset = 0, sign - int64_t lab, idxer, idxer_slot + int64_t idxer, idxer_slot int64_t[::1] label_seen = np.zeros(ngroups, dtype=np.int64) int64_t[:, ::1] label_indexer @@ -321,7 +321,7 @@ def group_shift_indexer(int64_t[::1] out, const int64_t[:] labels, @cython.wraparound(False) @cython.boundscheck(False) -def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, +def group_fillna_indexer(ndarray[int64_t] out, ndarray[intp_t] labels, ndarray[uint8_t] mask, object direction, int64_t limit, bint dropna): """ @@ -331,8 +331,9 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, ---------- out : array of int64_t values which this method will write its results to Missing values will be written to with a value of -1 - labels : array containing unique label for each group, with its ordering - matching up to the corresponding record in `values` + labels : np.ndarray[np.intp] + Array containing unique label for each group, with its ordering + matching up to the corresponding record in `values`. mask : array of int64_t values where a 1 indicates a missing value direction : {'ffill', 'bfill'} Direction for fill to be applied (forwards or backwards, respectively) @@ -344,9 +345,10 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, This method modifies the `out` parameter rather than returning an object """ cdef: - Py_ssize_t i, N - int64_t[:] sorted_labels - int64_t idx, curr_fill_idx=-1, filled_vals=0 + Py_ssize_t i, N, idx + intp_t[:] sorted_labels + intp_t curr_fill_idx=-1 + int64_t filled_vals = 0 N = len(out) @@ -354,7 +356,7 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, assert N == len(labels) == len(mask) sorted_labels = np.argsort(labels, kind='mergesort').astype( - np.int64, copy=False) + np.intp, copy=False) if direction == 'bfill': sorted_labels = sorted_labels[::-1] @@ -385,7 +387,7 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, @cython.wraparound(False) def group_any_all(uint8_t[::1] out, const uint8_t[::1] values, - const int64_t[:] labels, + const intp_t[:] labels, const uint8_t[::1] mask, object val_test, bint skipna): @@ -395,7 +397,8 @@ def group_any_all(uint8_t[::1] out, Parameters ---------- out : array of values which this method will write its results to - labels : array containing unique label for each group, with its + labels : np.ndarray[np.intp] + Array containing unique label for each group, with its ordering matching up to the corresponding record in `values` values : array containing the truth value of each element mask : array indicating whether a value is na or not @@ -411,7 +414,7 @@ def group_any_all(uint8_t[::1] out, """ cdef: Py_ssize_t i, N = len(labels) - int64_t lab + intp_t lab uint8_t flag_val if val_test == 'all': @@ -455,7 +458,7 @@ ctypedef fused complexfloating_t: def _group_add(complexfloating_t[:, ::1] out, int64_t[::1] counts, ndarray[complexfloating_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=0): """ Only aggregates on axis=0 using Kahan summation @@ -514,7 +517,7 @@ group_add_complex128 = _group_add['double complex'] def _group_prod(floating[:, ::1] out, int64_t[::1] counts, ndarray[floating, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=0): """ Only aggregates on axis=0 @@ -567,7 +570,7 @@ group_prod_float64 = _group_prod['double'] def _group_var(floating[:, ::1] out, int64_t[::1] counts, ndarray[floating, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1, int64_t ddof=1): cdef: @@ -625,7 +628,7 @@ group_var_float64 = _group_var['double'] def _group_mean(floating[:, ::1] out, int64_t[::1] counts, ndarray[floating, ndim=2] values, - const int64_t[::1] labels, + const intp_t[::1] labels, Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) @@ -681,7 +684,7 @@ group_mean_float64 = _group_mean['double'] def _group_ohlc(floating[:, ::1] out, int64_t[::1] counts, ndarray[floating, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -732,7 +735,7 @@ group_ohlc_float64 = _group_ohlc['double'] @cython.wraparound(False) def group_quantile(ndarray[float64_t] out, ndarray[numeric, ndim=1] values, - ndarray[int64_t] labels, + ndarray[intp_t] labels, ndarray[uint8_t] mask, float64_t q, object interpolation): @@ -743,7 +746,7 @@ def group_quantile(ndarray[float64_t] out, ---------- out : ndarray Array of aggregated values that will be written to. - labels : ndarray + labels : ndarray[np.intp] Array containing the unique group labels. values : ndarray Array containing the values to apply the function against. @@ -758,7 +761,7 @@ def group_quantile(ndarray[float64_t] out, cdef: Py_ssize_t i, N=len(labels), ngroups, grp_sz, non_na_sz Py_ssize_t grp_start=0, idx=0 - int64_t lab + intp_t lab uint8_t interp float64_t q_idx, frac, val, next_val ndarray[int64_t] counts, non_na_counts, sort_arr @@ -875,7 +878,7 @@ cdef inline bint _treat_as_na(rank_t val, bint is_datetimelike) nogil: def group_last(rank_t[:, ::1] out, int64_t[::1] counts, ndarray[rank_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -967,7 +970,7 @@ def group_last(rank_t[:, ::1] out, def group_nth(rank_t[:, ::1] out, int64_t[::1] counts, ndarray[rank_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int64_t min_count=-1, int64_t rank=1 ): """ @@ -1059,7 +1062,7 @@ def group_nth(rank_t[:, ::1] out, @cython.wraparound(False) def group_rank(float64_t[:, ::1] out, ndarray[rank_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, bint is_datetimelike, object ties_method="average", bint ascending=True, bint pct=False, object na_option="keep"): @@ -1070,7 +1073,8 @@ def group_rank(float64_t[:, ::1] out, ---------- out : array of float64_t values which this method will write its results to values : array of rank_t values to be ranked - labels : array containing unique label for each group, with its ordering + labels : np.ndarray[np.intp] + Array containing unique label for each group, with its ordering matching up to the corresponding record in `values` ngroups : int This parameter is not used, is needed to match signatures of other @@ -1131,7 +1135,7 @@ ctypedef fused groupby_t: cdef group_min_max(groupby_t[:, ::1] out, int64_t[::1] counts, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1, bint compute_max=True): """ @@ -1145,7 +1149,7 @@ cdef group_min_max(groupby_t[:, ::1] out, Input as a zeroed array, populated by group sizes during algorithm values : array Values to find column-wise min/max of. - labels : int64 array + labels : np.ndarray[np.intp] Labels to group by. min_count : Py_ssize_t, default -1 The minimum number of non-NA group elements, NA result if threshold @@ -1230,7 +1234,7 @@ cdef group_min_max(groupby_t[:, ::1] out, def group_max(groupby_t[:, ::1] out, int64_t[::1] counts, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1): """See group_min_max.__doc__""" group_min_max(out, counts, values, labels, min_count=min_count, compute_max=True) @@ -1241,7 +1245,7 @@ def group_max(groupby_t[:, ::1] out, def group_min(groupby_t[:, ::1] out, int64_t[::1] counts, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t min_count=-1): """See group_min_max.__doc__""" group_min_max(out, counts, values, labels, min_count=min_count, compute_max=False) @@ -1251,7 +1255,7 @@ def group_min(groupby_t[:, ::1] out, @cython.wraparound(False) def group_cummin_max(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, bint is_datetimelike, bint compute_max): @@ -1264,7 +1268,7 @@ def group_cummin_max(groupby_t[:, ::1] out, Array to store cummin/max in. values : array Values to take cummin/max of. - labels : int64 array + labels : np.ndarray[np.intp] Labels to group by. ngroups : int Number of groups, larger than all entries of `labels`. @@ -1282,7 +1286,7 @@ def group_cummin_max(groupby_t[:, ::1] out, Py_ssize_t i, j, N, K, size groupby_t val, mval ndarray[groupby_t, ndim=2] accum - int64_t lab + intp_t lab N, K = (<object>values).shape accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) @@ -1319,7 +1323,7 @@ def group_cummin_max(groupby_t[:, ::1] out, @cython.wraparound(False) def group_cummin(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, bint is_datetimelike): """See group_cummin_max.__doc__""" @@ -1330,7 +1334,7 @@ def group_cummin(groupby_t[:, ::1] out, @cython.wraparound(False) def group_cummax(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, + const intp_t[:] labels, int ngroups, bint is_datetimelike): """See group_cummin_max.__doc__""" diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 9ef3c859633c2..94a4d586b4f13 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -852,7 +852,7 @@ def get_level_sorter( @cython.boundscheck(False) @cython.wraparound(False) def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, - const int64_t[:] labels, + const intp_t[:] labels, Py_ssize_t max_bin, int axis): cdef: @@ -881,10 +881,10 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, return counts -def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups): +def generate_slices(const intp_t[:] labels, Py_ssize_t ngroups): cdef: Py_ssize_t i, group_size, n, start - int64_t lab + intp_t lab object slobj ndarray[int64_t] starts, ends @@ -910,7 +910,7 @@ def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups): return starts, ends -def indices_fast(ndarray index, const int64_t[:] labels, list keys, +def indices_fast(ndarray[intp_t] index, const int64_t[:] labels, list keys, list sorted_labels) -> dict: """ Parameters diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 5649d1378cda3..9acff1cac305c 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -10,6 +10,7 @@ import numpy as np cimport numpy as cnp from numpy cimport ( int64_t, + intp_t, ndarray, ) @@ -200,7 +201,7 @@ cdef class SeriesGrouper(_BaseGrouper): ndarray arr, index, dummy_arr, dummy_index object f, labels, values, typ, ityp, name - def __init__(self, object series, object f, object labels, + def __init__(self, object series, object f, ndarray[intp_t] labels, Py_ssize_t ngroups): if len(series) == 0: @@ -228,7 +229,8 @@ cdef class SeriesGrouper(_BaseGrouper): cdef: # Define result to avoid UnboundLocalError ndarray arr, result = None - ndarray[int64_t] labels, counts + ndarray[intp_t] labels + ndarray[int64_t] counts Py_ssize_t i, n, group_size, lab, start, end object res bint initialized = 0 diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 0ad1b4da03c70..77b5a0148905e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1035,7 +1035,7 @@ def rank( values = _get_values_for_rank(values) ranks = algos.rank_1d( values, - labels=np.zeros(len(values), dtype=np.int64), + labels=np.zeros(len(values), dtype=np.intp), ties_method=method, ascending=ascending, na_option=na_option, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 32439af6db238..eab6bdb3b2d66 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -104,7 +104,6 @@ validate_numeric_casting, ) from pandas.core.dtypes.common import ( - ensure_int64, ensure_platform_int, infer_dtype_from_object, is_bool_dtype, @@ -9493,7 +9492,7 @@ def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False): level_name = count_axis._names[level] level_index = count_axis.levels[level]._rename(name=level_name) - level_codes = ensure_int64(count_axis.codes[level]) + level_codes = ensure_platform_int(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis) if axis == 1: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b407212fe6a50..142654bbffb18 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -56,7 +56,6 @@ ) from pandas.core.dtypes.common import ( ensure_int64, - ensure_platform_int, is_bool, is_categorical_dtype, is_dict_like, @@ -896,7 +895,6 @@ def count(self) -> Series: val = self.obj._values mask = (ids != -1) & ~isna(val) - ids = ensure_platform_int(ids) minlength = ngroups or 0 out = np.bincount(ids[mask], minlength=minlength) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f33833193e4e0..76170d95b83df 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1140,24 +1140,31 @@ def _cython_agg_general( ) @final - def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): - """ - Perform groupby transform routine with the numba engine. - - This routine mimics the data splitting routine of the DataSplitter class - to generate the indices of each group in the sorted data and then passes the - data and indices into a Numba jitted function. - """ + def _numba_prep(self, func, data): if not callable(func): raise NotImplementedError( "Numba engine can only be used with a single function." ) - group_keys = self.grouper._get_group_keys() labels, _, n_groups = self.grouper.group_info sorted_index = get_group_index_sorter(labels, n_groups) sorted_labels = labels.take(sorted_index) + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + starts, ends = lib.generate_slices(sorted_labels, n_groups) + return starts, ends, sorted_index, sorted_data + + @final + def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby transform routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + starts, ends, sorted_index, sorted_data = self._numba_prep(func, data) + group_keys = self.grouper._get_group_keys() numba_transform_func = numba_.generate_numba_transform_func( tuple(args), kwargs, func, engine_kwargs @@ -1183,16 +1190,8 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function. """ - if not callable(func): - raise NotImplementedError( - "Numba engine can only be used with a single function." - ) + starts, ends, sorted_index, sorted_data = self._numba_prep(func, data) group_keys = self.grouper._get_group_keys() - labels, _, n_groups = self.grouper.group_info - sorted_index = get_group_index_sorter(labels, n_groups) - sorted_labels = labels.take(sorted_index) - sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() - starts, ends = lib.generate_slices(sorted_labels, n_groups) numba_agg_func = numba_.generate_numba_agg_func( tuple(args), kwargs, func, engine_kwargs @@ -2425,7 +2424,9 @@ def ngroup(self, ascending: bool = True): """ with group_selection_context(self): index = self._selected_obj.index - result = self._obj_1d_constructor(self.grouper.group_info[0], index) + result = self._obj_1d_constructor( + self.grouper.group_info[0], index, dtype=np.int64 + ) if not ascending: result = self.ngroups - 1 - result return result diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c5d36d1588a5f..60f7c54cfeaaf 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -353,7 +353,6 @@ def size(self) -> Series: Compute group sizes. """ ids, _, ngroup = self.group_info - ids = ensure_platform_int(ids) if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: @@ -381,7 +380,7 @@ def group_info(self): comp_ids, obs_group_ids = self._get_compressed_codes() ngroups = len(obs_group_ids) - comp_ids = ensure_int64(comp_ids) + comp_ids = ensure_platform_int(comp_ids) return comp_ids, obs_group_ids, ngroups @final @@ -707,7 +706,7 @@ def _transform( self, result, values, comp_ids, transform_func, is_datetimelike: bool, **kwargs ): - comp_ids, _, ngroups = self.group_info + _, _, ngroups = self.group_info transform_func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs) return result @@ -918,7 +917,7 @@ def group_info(self): comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) return ( - comp_ids.astype("int64", copy=False), + ensure_platform_int(comp_ids), obs_group_ids.astype("int64", copy=False), ngroups, ) @@ -981,14 +980,14 @@ def _is_indexed_like(obj, axes, axis: int) -> bool: class DataSplitter(Generic[FrameOrSeries]): def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0): self.data = data - self.labels = ensure_int64(labels) + self.labels = ensure_platform_int(labels) # _should_ already be np.intp self.ngroups = ngroups self.axis = axis assert isinstance(axis, int), axis @cache_readonly - def slabels(self) -> np.ndarray: + def slabels(self) -> np.ndarray: # np.ndarray[np.intp] # Sorted labels return self.labels.take(self._sort_idx) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 02c41538ca123..856bd7d159c71 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -237,6 +237,7 @@ def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool): Parameters ---------- + comp_ids : np.ndarray[np.intp] xnull : bool If nulls are excluded; i.e. -1 labels are passed through. """ @@ -249,7 +250,8 @@ def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool): out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)] - i = unique_label_indices(comp_ids) + # TODO: unique_label_indices only used here, should take ndarray[np.intp] + i = unique_label_indices(ensure_int64(comp_ids)) i8copy = lambda a: a.astype("i8", subok=False, copy=True) return [i8copy(lab[i]) for lab in labels] @@ -517,7 +519,7 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None): def get_flattened_list( - comp_ids: np.ndarray, + comp_ids: np.ndarray, # np.ndarray[np.intp] ngroups: int, levels: Iterable[Index], labels: Iterable[np.ndarray], @@ -584,7 +586,7 @@ def get_group_index_sorter( Parameters ---------- - group_index : np.ndarray + group_index : np.ndarray[np.intp] signed integer dtype ngroups : int or None, default None diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index bb541739c7f44..5fcf4a73479a5 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -15,7 +15,7 @@ def test_series_grouper(): obj = Series(np.random.randn(10)) - labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.intp) grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2) result, counts = grouper.get_result() @@ -31,7 +31,7 @@ def test_series_grouper_requires_nonempty_raises(): # GH#29500 obj = Series(np.random.randn(10)) dummy = obj.iloc[:0] - labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.intp) with pytest.raises(ValueError, match="SeriesGrouper requires non-empty `series`"): libreduction.SeriesGrouper(dummy, np.mean, labels, 2) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 2d10dd8d18dc1..3d02e784d83b0 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -663,7 +663,7 @@ def test_groupby_empty(self): # check group properties assert len(gr.grouper.groupings) == 1 tm.assert_numpy_array_equal( - gr.grouper.group_info[0], np.array([], dtype=np.dtype("int64")) + gr.grouper.group_info[0], np.array([], dtype=np.dtype(np.intp)) ) tm.assert_numpy_array_equal( diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py index 30b0edf8a139e..febc12edf0b32 100644 --- a/pandas/tests/groupby/test_libgroupby.py +++ b/pandas/tests/groupby/test_libgroupby.py @@ -8,7 +8,7 @@ group_var_float64, ) -from pandas.core.dtypes.common import ensure_int64 +from pandas.core.dtypes.common import ensure_platform_int from pandas import isna import pandas._testing as tm @@ -21,7 +21,7 @@ def test_group_var_generic_1d(self): out = (np.nan * np.ones((5, 1))).astype(self.dtype) counts = np.zeros(5, dtype="int64") values = 10 * prng.rand(15, 1).astype(self.dtype) - labels = np.tile(np.arange(5), (3,)).astype("int64") + labels = np.tile(np.arange(5), (3,)).astype("intp") expected_out = ( np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2 @@ -38,7 +38,7 @@ def test_group_var_generic_1d_flat_labels(self): out = (np.nan * np.ones((1, 1))).astype(self.dtype) counts = np.zeros(1, dtype="int64") values = 10 * prng.rand(5, 1).astype(self.dtype) - labels = np.zeros(5, dtype="int64") + labels = np.zeros(5, dtype="intp") expected_out = np.array([[values.std(ddof=1) ** 2]]) expected_counts = counts + 5 @@ -54,7 +54,7 @@ def test_group_var_generic_2d_all_finite(self): out = (np.nan * np.ones((5, 2))).astype(self.dtype) counts = np.zeros(5, dtype="int64") values = 10 * prng.rand(10, 2).astype(self.dtype) - labels = np.tile(np.arange(5), (2,)).astype("int64") + labels = np.tile(np.arange(5), (2,)).astype("intp") expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2 expected_counts = counts + 2 @@ -70,7 +70,7 @@ def test_group_var_generic_2d_some_nan(self): counts = np.zeros(5, dtype="int64") values = 10 * prng.rand(10, 2).astype(self.dtype) values[:, 1] = np.nan - labels = np.tile(np.arange(5), (2,)).astype("int64") + labels = np.tile(np.arange(5), (2,)).astype("intp") expected_out = np.vstack( [ @@ -90,7 +90,7 @@ def test_group_var_constant(self): out = np.array([[np.nan]], dtype=self.dtype) counts = np.array([0], dtype="int64") values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype) - labels = np.zeros(3, dtype="int64") + labels = np.zeros(3, dtype="intp") self.algo(out, counts, values, labels) @@ -113,7 +113,7 @@ def test_group_var_large_inputs(self): counts = np.array([0], dtype="int64") values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype) values.shape = (10 ** 6, 1) - labels = np.zeros(10 ** 6, dtype="int64") + labels = np.zeros(10 ** 6, dtype="intp") self.algo(out, counts, values, labels) @@ -136,7 +136,7 @@ def _check(dtype): bins = np.array([6, 12, 20]) out = np.zeros((3, 4), dtype) counts = np.zeros(len(out), dtype=np.int64) - labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) + labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) func = getattr(libgroupby, f"group_ohlc_{dtype}") func(out, counts, obj[:, None], labels) @@ -178,7 +178,7 @@ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype): data = np.array([[1], [2], [3], [4]], dtype=dtype) answer = np.zeros_like(data) - labels = np.array([0, 0, 0, 0], dtype=np.int64) + labels = np.array([0, 0, 0, 0], dtype=np.intp) ngroups = 1 pd_op(answer, data, labels, ngroups, is_datetimelike) @@ -204,7 +204,7 @@ def test_cython_group_transform_algos(): is_datetimelike = False # with nans - labels = np.array([0, 0, 0, 0, 0], dtype=np.int64) + labels = np.array([0, 0, 0, 0, 0], dtype=np.intp) ngroups = 1 data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64") diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index cd800b3f3a452..b9d7e59ea9716 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1712,7 +1712,7 @@ def test_quantile(): def test_unique_label_indices(): - a = np.random.randint(1, 1 << 10, 1 << 15).astype("i8") + a = np.random.randint(1, 1 << 10, 1 << 15).astype("int64") left = ht.unique_label_indices(a) right = np.unique(a, return_index=True)[1] @@ -1733,7 +1733,7 @@ def test_scipy_compat(self): def _check(arr): mask = ~np.isfinite(arr) arr = arr.copy() - result = libalgos.rank_1d(arr, labels=np.zeros(len(arr), dtype=np.int64)) + result = libalgos.rank_1d(arr, labels=np.zeros(len(arr), dtype=np.intp)) arr[mask] = np.inf exp = rankdata(arr) exp[mask] = np.nan
@realead pushing this any further requires getting a HashTable for intp_t. Would that be difficult?
https://api.github.com/repos/pandas-dev/pandas/pulls/40635
2021-03-25T15:45:30Z
2021-03-26T13:10:00Z
2021-03-26T13:10:00Z
2021-03-26T15:48:25Z
Backport PR #40592: REGR: where not copying on no-op
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index c7bc337239faf..45d131327630e 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) +- Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 99218cebc37e1..5bc820e76bff1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1458,7 +1458,7 @@ def where( raise ValueError("where must have a condition that is ndarray like") if cond.ravel("K").all(): - result = values + result = values.copy() else: # see if we can operate on the entire block, or need item-by-item # or if we are a single block (ndim == 1) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index acdb5726e4adb..2b3c6010bf633 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -653,3 +653,20 @@ def test_where_categorical_filtering(self): expected.loc[0, :] = np.nan tm.assert_equal(result, expected) + + +def test_where_copies_with_noop(frame_or_series): + # GH-39595 + result = frame_or_series([1, 2, 3, 4]) + expected = result.copy() + col = result[0] if frame_or_series is DataFrame else result + + where_res = result.where(col < 5) + where_res *= 2 + + tm.assert_equal(result, expected) + + where_res = result.where(col > 5, [1, 2, 3, 4]) + where_res *= 2 + + tm.assert_equal(result, expected)
Backport PR #40592
https://api.github.com/repos/pandas-dev/pandas/pulls/40634
2021-03-25T13:38:52Z
2021-03-25T15:23:25Z
2021-03-25T15:23:25Z
2021-03-25T15:23:29Z
Backport PR #40457: CI: fix failing script/tests
diff --git a/Makefile b/Makefile index 2c968234749f5..ae1b082626629 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY : develop build clean clean_pyc doc lint-diff black +.PHONY : develop build clean clean_pyc doc lint-diff black test-scripts all: develop @@ -38,3 +38,6 @@ check: --included-file-extensions="py" \ --excluded-file-paths=pandas/tests,asv_bench/,doc/ pandas/ + +test-scripts: + pytest scripts diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py index 37e6d288d9341..10cb3042dfacb 100644 --- a/scripts/tests/test_inconsistent_namespace_check.py +++ b/scripts/tests/test_inconsistent_namespace_check.py @@ -2,7 +2,7 @@ import pytest -from scripts.check_for_inconsistent_pandas_namespace import main +from ..check_for_inconsistent_pandas_namespace import main BAD_FILE_0 = "cat_0 = Categorical()\ncat_1 = pd.Categorical()" BAD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = Categorical()" diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 74819db7b878c..7e4c68ddc183b 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -2,7 +2,8 @@ import textwrap import pytest -import validate_docstrings + +from .. import validate_docstrings class BadDocstrings: @@ -162,7 +163,9 @@ def test_bad_class(self, capsys): ( "BadDocstrings", "indentation_is_not_a_multiple_of_four", - ("flake8 error: E111 indentation is not a multiple of four",), + # with flake8 3.9.0, the message ends with four spaces, + # whereas in earlier versions, it ended with "four" + ("flake8 error: E111 indentation is not a multiple of ",), ), ( "BadDocstrings", diff --git a/scripts/tests/test_validate_unwanted_patterns.py b/scripts/tests/test_validate_unwanted_patterns.py index 947666a730ee9..ef93fd1d21981 100644 --- a/scripts/tests/test_validate_unwanted_patterns.py +++ b/scripts/tests/test_validate_unwanted_patterns.py @@ -1,7 +1,8 @@ import io import pytest -import validate_unwanted_patterns + +from .. import validate_unwanted_patterns class TestBarePytestRaises:
Backport PR #40457
https://api.github.com/repos/pandas-dev/pandas/pulls/40633
2021-03-25T11:21:11Z
2021-03-25T12:13:47Z
2021-03-25T12:13:47Z
2021-03-25T12:13:52Z
CI: fix pre-commit on 1.2.x
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 90d65327ea980..31c926233d5b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -164,6 +164,7 @@ repos: rev: v1.2.2 hooks: - id: yesqa + additional_dependencies: [flake8==3.8.4] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.3.0 hooks:
https://github.com/pandas-dev/pandas/pull/40481#issuecomment-806522323
https://api.github.com/repos/pandas-dev/pandas/pulls/40632
2021-03-25T10:40:12Z
2021-03-25T11:53:56Z
2021-03-25T11:53:56Z
2021-03-25T11:54:00Z
ENH: New boundary inputs
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b92e414f2055e..cb8df16d6c0fb 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -276,6 +276,7 @@ Other enhancements - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) - Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- :meth:`Series.between` can now accept ``left`` or ``right`` as arguments to ``inclusive`` to include only the left or right boundary (:issue:`40245`) - :meth:`DataFrame.explode` now supports exploding multiple columns. Its ``column`` argument now also accepts a list of str or tuples for exploding on multiple columns at the same time (:issue:`39240`) .. --------------------------------------------------------------------------- @@ -838,6 +839,7 @@ Other Deprecations - Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing lists as ``key`` to :meth:`DataFrame.xs` and :meth:`Series.xs` (:issue:`41760`) +- Deprecated boolean arguments of ``inclusive`` in :meth:`Series.between` to have ``{"left", "right", "neither", "both"}`` as standard argument values (:issue:`40628`) - Deprecated passing arguments as positional for all of the following, with exceptions noted (:issue:`41485`): - :func:`concat` (other than ``objs``) diff --git a/pandas/core/series.py b/pandas/core/series.py index c20e09e9eac5d..928a6807a1f9f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4965,7 +4965,7 @@ def isin(self, values) -> Series: self, method="isin" ) - def between(self, left, right, inclusive=True) -> Series: + def between(self, left, right, inclusive="both") -> Series: """ Return boolean Series equivalent to left <= series <= right. @@ -4979,8 +4979,9 @@ def between(self, left, right, inclusive=True) -> Series: Left boundary. right : scalar or list-like Right boundary. - inclusive : bool, default True - Include boundaries. + inclusive : {"both", "neither", "left", "right"} + Include boundaries. Whether to set each bound as closed or open. + .. versionchanged:: 1.3.0 Returns ------- @@ -5031,12 +5032,34 @@ def between(self, left, right, inclusive=True) -> Series: 3 False dtype: bool """ - if inclusive: + if inclusive is True or inclusive is False: + warnings.warn( + "Boolean inputs to the `inclusive` argument are deprecated in" + "favour of `both` or `neither`.", + FutureWarning, + stacklevel=2, + ) + if inclusive: + inclusive = "both" + else: + inclusive = "neither" + if inclusive == "both": lmask = self >= left rmask = self <= right - else: + elif inclusive == "left": + lmask = self >= left + rmask = self < right + elif inclusive == "right": + lmask = self > left + rmask = self <= right + elif inclusive == "neither": lmask = self > left rmask = self < right + else: + raise ValueError( + "Inclusive has to be either string of 'both'," + "'left', 'right', or 'neither'." + ) return lmask & rmask diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py index 381c733619c6b..9c11b71e4bee6 100644 --- a/pandas/tests/series/methods/test_between.py +++ b/pandas/tests/series/methods/test_between.py @@ -1,4 +1,5 @@ import numpy as np +import pytest from pandas import ( Series, @@ -28,7 +29,7 @@ def test_between_datetime_values(self): expected = ser[3:18].dropna() tm.assert_series_equal(result, expected) - result = ser[ser.between(ser[3], ser[17], inclusive=False)] + result = ser[ser.between(ser[3], ser[17], inclusive="neither")] expected = ser[5:16].dropna() tm.assert_series_equal(result, expected) @@ -38,3 +39,48 @@ def test_between_period_values(self): result = ser.between(left, right) expected = (ser >= left) & (ser <= right) tm.assert_series_equal(result, expected) + + def test_between_inclusive_string(self): # :issue:`40628` + series = Series(date_range("1/1/2000", periods=10)) + left, right = series[[2, 7]] + + result = series.between(left, right, inclusive="both") + expected = (series >= left) & (series <= right) + tm.assert_series_equal(result, expected) + + result = series.between(left, right, inclusive="left") + expected = (series >= left) & (series < right) + tm.assert_series_equal(result, expected) + + result = series.between(left, right, inclusive="right") + expected = (series > left) & (series <= right) + tm.assert_series_equal(result, expected) + + result = series.between(left, right, inclusive="neither") + expected = (series > left) & (series < right) + tm.assert_series_equal(result, expected) + + def test_between_error_args(self): # :issue:`40628` + series = Series(date_range("1/1/2000", periods=10)) + left, right = series[[2, 7]] + + value_error_msg = ( + "Inclusive has to be either string of 'both'," + "'left', 'right', or 'neither'." + ) + + with pytest.raises(ValueError, match=value_error_msg): + series = Series(date_range("1/1/2000", periods=10)) + series.between(left, right, inclusive="yes") + + def test_between_inclusive_warning(self): + series = Series(date_range("1/1/2000", periods=10)) + left, right = series[[2, 7]] + with tm.assert_produces_warning(FutureWarning): + result = series.between(left, right, inclusive=False) + expected = (series > left) & (series < right) + tm.assert_series_equal(result, expected) + with tm.assert_produces_warning(FutureWarning): + result = series.between(left, right, inclusive=True) + expected = (series >= left) & (series <= right) + tm.assert_series_equal(result, expected)
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] tests added / passed - [x] closes #40245 - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40628
2021-03-25T05:52:36Z
2021-06-25T09:23:15Z
2021-06-25T09:23:14Z
2022-10-29T01:47:19Z
REF: SingleBlockManager dont subclass BlockManager
diff --git a/pandas/_typing.py b/pandas/_typing.py index d6561176deb71..36441e620286d 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -185,7 +185,7 @@ ] # internals -Manager = Union["ArrayManager", "BlockManager"] +Manager = Union["ArrayManager", "BlockManager", "SingleBlockManager"] SingleManager = Union["SingleArrayManager", "SingleBlockManager"] # indexing diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 596993182a9c1..64ec0801b5d8a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -548,6 +548,7 @@ class DataFrame(NDFrame, OpsMixin): _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) + _mgr: BlockManager | ArrayManager @property def _constructor(self) -> type[DataFrame]: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2c55f46ea3d62..cdf919923d21c 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -24,6 +24,7 @@ Dtype, DtypeObj, Shape, + type_t, ) from pandas.errors import PerformanceWarning from pandas.util._validators import validate_bool_kwarg @@ -80,10 +81,10 @@ # TODO: flexible with index=None and/or items=None -T = TypeVar("T", bound="BlockManager") +T = TypeVar("T", bound="BaseBlockManager") -class BlockManager(DataManager): +class BaseBlockManager(DataManager): """ Core internal data structure to implement DataFrame, Series, etc. @@ -145,59 +146,18 @@ class BlockManager(DataManager): _blknos: np.ndarray _blklocs: np.ndarray + blocks: tuple[Block, ...] + axes: list[Index] # Non-trivially faster than a property - ndim = 2 # overridden by SingleBlockManager + ndim: int - def __init__( - self, - blocks: Sequence[Block], - axes: Sequence[Index], - verify_integrity: bool = True, - ): - self.axes = [ensure_index(ax) for ax in axes] - self.blocks: tuple[Block, ...] = tuple(blocks) - - for block in blocks: - if self.ndim != block.ndim: - raise AssertionError( - f"Number of Block dimensions ({block.ndim}) must equal " - f"number of axes ({self.ndim})" - ) - - if verify_integrity: - self._verify_integrity() - - # Populate known_consolidate, blknos, and blklocs lazily - self._known_consolidated = False - # error: Incompatible types in assignment (expression has type "None", - # variable has type "ndarray") - self._blknos = None # type: ignore[assignment] - # error: Incompatible types in assignment (expression has type "None", - # variable has type "ndarray") - self._blklocs = None # type: ignore[assignment] + def __init__(self, blocks, axes, verify_integrity=True): + raise NotImplementedError @classmethod - def _simple_new(cls, blocks: tuple[Block, ...], axes: list[Index]): - """ - Fastpath constructor; does NO validation. - """ - obj = cls.__new__(cls) - obj.axes = axes - obj.blocks = blocks - - # Populate known_consolidate, blknos, and blklocs lazily - obj._known_consolidated = False - obj._blknos = None - obj._blklocs = None - return obj - - @classmethod - def from_blocks(cls, blocks: list[Block], axes: list[Index]): - """ - Constructor for BlockManager and SingleBlockManager with same signature. - """ - return cls(blocks, axes, verify_integrity=False) + def from_blocks(cls: type_t[T], blocks: list[Block], axes: list[Index]) -> T: + raise NotImplementedError @property def blknos(self): @@ -336,7 +296,7 @@ def __getstate__(self): return axes_array, block_values, block_items, extra_state def __setstate__(self, state): - def unpickle_block(values, mgr_locs, ndim: int): + def unpickle_block(values, mgr_locs, ndim: int) -> Block: # TODO(EA2D): ndim would be unnecessary with 2D EAs # older pickles may store e.g. DatetimeIndex instead of DatetimeArray values = extract_array(values, extract_numpy=True) @@ -466,12 +426,6 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: return type(self).from_blocks(result_blocks, [self.axes[0], index]) - def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: - """ - Apply array_op blockwise with another (aligned) BlockManager. - """ - return operate_blockwise(self, other, array_op) - def apply( self: T, f, @@ -539,12 +493,12 @@ def apply( return type(self).from_blocks(result_blocks, self.axes) def quantile( - self, + self: T, *, qs: Float64Index, axis: int = 0, interpolation="linear", - ) -> BlockManager: + ) -> T: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and @@ -578,7 +532,7 @@ def quantile( return type(self)(blocks, new_axes) - def where(self, other, cond, align: bool, errors: str) -> BlockManager: + def where(self: T, other, cond, align: bool, errors: str) -> T: if align: align_keys = ["other", "cond"] else: @@ -593,7 +547,7 @@ def where(self, other, cond, align: bool, errors: str) -> BlockManager: errors=errors, ) - def setitem(self, indexer, value) -> BlockManager: + def setitem(self: T, indexer, value) -> T: return self.apply("setitem", indexer=indexer, value=value) def putmask(self, mask, new, align: bool = True): @@ -611,38 +565,38 @@ def putmask(self, mask, new, align: bool = True): new=new, ) - def diff(self, n: int, axis: int) -> BlockManager: + def diff(self: T, n: int, axis: int) -> T: axis = self._normalize_axis(axis) return self.apply("diff", n=n, axis=axis) - def interpolate(self, **kwargs) -> BlockManager: + def interpolate(self: T, **kwargs) -> T: return self.apply("interpolate", **kwargs) - def shift(self, periods: int, axis: int, fill_value) -> BlockManager: + def shift(self: T, periods: int, axis: int, fill_value) -> T: axis = self._normalize_axis(axis) if fill_value is lib.no_default: fill_value = None return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value) - def fillna(self, value, limit, inplace: bool, downcast) -> BlockManager: + def fillna(self: T, value, limit, inplace: bool, downcast) -> T: return self.apply( "fillna", value=value, limit=limit, inplace=inplace, downcast=downcast ) - def downcast(self) -> BlockManager: + def downcast(self: T) -> T: return self.apply("downcast") - def astype(self, dtype, copy: bool = False, errors: str = "raise") -> BlockManager: + def astype(self: T, dtype, copy: bool = False, errors: str = "raise") -> T: return self.apply("astype", dtype=dtype, copy=copy, errors=errors) def convert( - self, + self: T, copy: bool = True, datetime: bool = True, numeric: bool = True, timedelta: bool = True, - ) -> BlockManager: + ) -> T: return self.apply( "convert", copy=copy, @@ -651,7 +605,7 @@ def convert( timedelta=timedelta, ) - def replace(self, to_replace, value, inplace: bool, regex: bool) -> BlockManager: + def replace(self: T, to_replace, value, inplace: bool, regex: bool) -> T: assert np.ndim(value) == 0, value return self.apply( "replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex @@ -677,7 +631,7 @@ def replace_list( bm._consolidate_inplace() return bm - def to_native_types(self, **kwargs) -> BlockManager: + def to_native_types(self: T, **kwargs) -> T: """ Convert values to native types (strings / python objects) that are used in formatting (repr / csv). @@ -721,7 +675,7 @@ def is_view(self) -> bool: return False - def get_bool_data(self, copy: bool = False) -> BlockManager: + def get_bool_data(self: T, copy: bool = False) -> T: """ Select blocks that are bool-dtype and columns from object-dtype blocks that are all-bool. @@ -746,7 +700,7 @@ def get_bool_data(self, copy: bool = False) -> BlockManager: return self._combine(new_blocks, copy) - def get_numeric_data(self, copy: bool = False) -> BlockManager: + def get_numeric_data(self: T, copy: bool = False) -> T: """ Parameters ---------- @@ -779,21 +733,6 @@ def _combine( return type(self).from_blocks(new_blocks, axes) - def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager: - assert isinstance(slobj, slice), type(slobj) - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(slobj) - elif axis == 1: - new_blocks = [blk.getitem_block_index(slobj) for blk in self.blocks] - else: - raise IndexError("Requested axis not found in manager") - - new_axes = list(self.axes) - new_axes[axis] = new_axes[axis]._getitem_slice(slobj) - - return type(self)._simple_new(tuple(new_blocks), new_axes) - @property def nblocks(self) -> int: return len(self.blocks) @@ -1007,7 +946,7 @@ def fast_xs(self, loc: int) -> ArrayLike: return result - def consolidate(self) -> BlockManager: + def consolidate(self: T) -> T: """ Join together blocks having same dtype @@ -1030,19 +969,6 @@ def _consolidate_inplace(self) -> None: self._known_consolidated = True self._rebuild_blknos_and_blklocs() - def iget(self, i: int) -> SingleBlockManager: - """ - Return the data as a SingleBlockManager. - """ - block = self.blocks[self.blknos[i]] - values = block.iget(self.blklocs[i]) - - # shortcut for select a single-dim from a 2-dim BM - bp = BlockPlacement(slice(0, len(values))) - values = maybe_coerce_values(values) - nb = type(block)(values, placement=bp, ndim=1) - return SingleBlockManager(nb, self.axes[1]) - def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). @@ -1051,19 +977,6 @@ def iget_values(self, i: int) -> ArrayLike: values = block.iget(self.blklocs[i]) return values - def idelete(self, indexer) -> BlockManager: - """ - Delete selected locations, returning a new BlockManager. - """ - is_deleted = np.zeros(self.shape[0], dtype=np.bool_) - is_deleted[indexer] = True - taker = (~is_deleted).nonzero()[0] - - nbs = self._slice_take_blocks_ax0(taker, only_slice=True) - new_columns = self.items[~is_deleted] - axes = [new_columns, self.axes[1]] - return type(self)._simple_new(tuple(nbs), axes) - def iset(self, loc: int | slice | np.ndarray, value: ArrayLike): """ Set new item in-place. Does not consolidate. Adds new Block if not @@ -1481,21 +1394,123 @@ def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T: consolidate=False, ) - def _equal_values(self: T, other: T) -> bool: + +class BlockManager(BaseBlockManager): + """ + BaseBlockManager that holds 2D blocks. + """ + + ndim = 2 + + def __init__( + self, + blocks: Sequence[Block], + axes: Sequence[Index], + verify_integrity: bool = True, + ): + self.axes = [ensure_index(ax) for ax in axes] + self.blocks: tuple[Block, ...] = tuple(blocks) + + for block in blocks: + if self.ndim != block.ndim: + raise AssertionError( + f"Number of Block dimensions ({block.ndim}) must equal " + f"number of axes ({self.ndim})" + ) + + if verify_integrity: + self._verify_integrity() + + # Populate known_consolidate, blknos, and blklocs lazily + self._known_consolidated = False + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + self._blknos = None # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + self._blklocs = None # type: ignore[assignment] + + @classmethod + def _simple_new(cls, blocks: tuple[Block, ...], axes: list[Index]): + """ + Fastpath constructor; does NO validation. + """ + obj = cls.__new__(cls) + obj.axes = axes + obj.blocks = blocks + + # Populate known_consolidate, blknos, and blklocs lazily + obj._known_consolidated = False + obj._blknos = None + obj._blklocs = None + return obj + + @classmethod + def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager: + """ + Constructor for BlockManager and SingleBlockManager with same signature. + """ + return cls(blocks, axes, verify_integrity=False) + + def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager: + assert isinstance(slobj, slice), type(slobj) + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(slobj) + elif axis == 1: + new_blocks = [blk.getitem_block_index(slobj) for blk in self.blocks] + else: + raise IndexError("Requested axis not found in manager") + + new_axes = list(self.axes) + new_axes[axis] = new_axes[axis]._getitem_slice(slobj) + + return type(self)._simple_new(tuple(new_blocks), new_axes) + + def iget(self, i: int) -> SingleBlockManager: + """ + Return the data as a SingleBlockManager. + """ + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + + # shortcut for select a single-dim from a 2-dim BM + bp = BlockPlacement(slice(0, len(values))) + values = maybe_coerce_values(values) + nb = type(block)(values, placement=bp, ndim=1) + return SingleBlockManager(nb, self.axes[1]) + + def idelete(self, indexer) -> BlockManager: + """ + Delete selected locations, returning a new BlockManager. + """ + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + taker = (~is_deleted).nonzero()[0] + + nbs = self._slice_take_blocks_ax0(taker, only_slice=True) + new_columns = self.items[~is_deleted] + axes = [new_columns, self.axes[1]] + return type(self)._simple_new(tuple(nbs), axes) + + # ---------------------------------------------------------------- + # Block-wise Operation + + def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + return operate_blockwise(self, other, array_op) + + def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ - if self.ndim == 1: - # For SingleBlockManager (i.e.Series) - if other.ndim != 1: - return False - left = self.blocks[0].values - right = other.blocks[0].values - return array_equals(left, right) - return blockwise_all(self, other, array_equals) + # ---------------------------------------------------------------- + def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked.. @@ -1534,7 +1549,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager: return bm -class SingleBlockManager(BlockManager, SingleDataManager): +class SingleBlockManager(BaseBlockManager, SingleDataManager): """ manage a single block with """ ndim = 1 @@ -1687,6 +1702,18 @@ def set_values(self, values: ArrayLike): self.blocks[0].values = values self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values))) + def _equal_values(self: T, other: T) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + # For SingleBlockManager (i.e.Series) + if other.ndim != 1: + return False + left = self.blocks[0].values + right = other.blocks[0].values + return array_equals(left, right) + # -------------------------------------------------------------------- # Constructor Helpers diff --git a/pandas/core/series.py b/pandas/core/series.py index 8e9df114feda0..968ab27d6d58c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1107,12 +1107,8 @@ def _set_labels(self, key, value): def _set_values(self, key, value): if isinstance(key, Series): key = key._values - # error: Incompatible types in assignment (expression has type "Union[Any, - # BlockManager]", variable has type "Union[SingleArrayManager, - # SingleBlockManager]") - self._mgr = self._mgr.setitem( # type: ignore[assignment] - indexer=key, value=value - ) + + self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False):
Instead, most of BlockManager becomes _BlockManager which is subclassed by SingleBlockManager and BlockManager, the latter holding only the 2d-specific BM methods. Motivation: putting BlockManager constructor in cython gives a much bigger gain if we can define `__cinit__`, and we can't do that with the two subclasses having mismatched `__init__` signatures.
https://api.github.com/repos/pandas-dev/pandas/pulls/40625
2021-03-25T03:54:12Z
2021-04-08T20:16:27Z
2021-04-08T20:16:27Z
2021-04-08T20:57:12Z
TYP: intp in libalgos
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 122a014604bf0..d3edcc4e57b2d 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -591,16 +591,17 @@ def validate_limit(nobs: int, limit=None) -> int: @cython.boundscheck(False) @cython.wraparound(False) -def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): +def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: + # -> ndarray[intp_t, ndim=1] cdef: Py_ssize_t i, j, nleft, nright - ndarray[int64_t, ndim=1] indexer + ndarray[intp_t, ndim=1] indexer algos_t cur, next_val int lim, fill_count = 0 nleft = len(old) nright = len(new) - indexer = np.empty(nright, dtype=np.int64) + indexer = np.empty(nright, dtype=np.intp) indexer[:] = -1 lim = validate_limit(nright, limit) @@ -737,15 +738,16 @@ D @cython.boundscheck(False) @cython.wraparound(False) def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: + # -> ndarray[intp_t, ndim=1] cdef: Py_ssize_t i, j, nleft, nright - ndarray[int64_t, ndim=1] indexer + ndarray[intp_t, ndim=1] indexer algos_t cur, prev int lim, fill_count = 0 nleft = len(old) nright = len(new) - indexer = np.empty(nright, dtype=np.int64) + indexer = np.empty(nright, dtype=np.intp) indexer[:] = -1 lim = validate_limit(nright, limit) diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in index 929cb86c41036..11679fc432edc 100644 --- a/pandas/_libs/algos_take_helper.pxi.in +++ b/pandas/_libs/algos_take_helper.pxi.in @@ -219,8 +219,8 @@ def take_2d_multi_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values, fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - ndarray[int64_t] idx0 = indexer[0] - ndarray[int64_t] idx1 = indexer[1] + ndarray[intp_t] idx0 = indexer[0] + ndarray[intp_t] idx1 = indexer[1] {{c_type_out}} fv n = len(idx0) diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 9159fa03c12c0..71f4b0c0ae18f 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -458,19 +458,19 @@ cdef class DatetimeEngine(Int64Engine): def get_indexer(self, ndarray values): self._ensure_mapping_populated() if values.dtype != self._get_box_dtype(): - return np.repeat(-1, len(values)).astype('i4') + return np.repeat(-1, len(values)).astype(np.intp) values = np.asarray(values).view('i8') return self.mapping.lookup(values) def get_pad_indexer(self, other: np.ndarray, limit=None) -> np.ndarray: if other.dtype != self._get_box_dtype(): - return np.repeat(-1, len(other)).astype('i4') + return np.repeat(-1, len(other)).astype(np.intp) other = np.asarray(other).view('i8') return algos.pad(self._get_index_values(), other, limit=limit) def get_backfill_indexer(self, other: np.ndarray, limit=None) -> np.ndarray: if other.dtype != self._get_box_dtype(): - return np.repeat(-1, len(other)).astype('i4') + return np.repeat(-1, len(other)).astype(np.intp) other = np.asarray(other).view('i8') return algos.backfill(self._get_index_values(), other, limit=limit) @@ -653,7 +653,7 @@ cdef class BaseMultiIndexCodesEngine: ndarray[int64_t, ndim=1] target_order ndarray[object, ndim=1] target_values ndarray[int64_t, ndim=1] new_codes, new_target_codes - ndarray[int64_t, ndim=1] sorted_indexer + ndarray[intp_t, ndim=1] sorted_indexer target_order = np.argsort(target).astype('int64') target_values = target[target_order] @@ -694,9 +694,8 @@ cdef class BaseMultiIndexCodesEngine: next_code += 1 # get the indexer, and undo the sorting of `target.values` - sorted_indexer = ( - algos.backfill if method == "backfill" else algos.pad - )(new_codes, new_target_codes, limit=limit).astype('int64') + algo = algos.backfill if method == "backfill" else algos.pad + sorted_indexer = algo(new_codes, new_target_codes, limit=limit) return sorted_indexer[np.argsort(target_order)] def get_loc(self, object key): diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 2a6080e38a732..6dfdc99f4fd9c 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -15,10 +15,7 @@ from pandas._typing import ArrayLike from pandas.core.dtypes.cast import maybe_promote -from pandas.core.dtypes.common import ( - ensure_int64, - ensure_platform_int, -) +from pandas.core.dtypes.common import ensure_platform_int from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.construction import ensure_wrapped_if_datetimelike @@ -201,7 +198,7 @@ def take_1d( def take_2d_multi( - arr: np.ndarray, indexer: np.ndarray, fill_value=np.nan + arr: np.ndarray, indexer: tuple[np.ndarray, np.ndarray], fill_value=np.nan ) -> np.ndarray: """ Specialized Cython take which sets NaN values in one pass. @@ -214,11 +211,9 @@ def take_2d_multi( row_idx, col_idx = indexer - row_idx = ensure_int64(row_idx) - col_idx = ensure_int64(col_idx) - # error: Incompatible types in assignment (expression has type "Tuple[Any, Any]", - # variable has type "ndarray") - indexer = row_idx, col_idx # type: ignore[assignment] + row_idx = ensure_platform_int(row_idx) + col_idx = ensure_platform_int(col_idx) + indexer = row_idx, col_idx mask_info = None # check for promotion based on types only (do this first because @@ -474,7 +469,7 @@ def _take_nd_object( if arr.dtype != out.dtype: arr = arr.astype(out.dtype) if arr.shape[axis] > 0: - arr.take(ensure_platform_int(indexer), axis=axis, out=out) + arr.take(indexer, axis=axis, out=out) if needs_masking: outindexer = [slice(None)] * arr.ndim outindexer[axis] = mask @@ -482,11 +477,15 @@ def _take_nd_object( def _take_2d_multi_object( - arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value, mask_info + arr: np.ndarray, + indexer: tuple[np.ndarray, np.ndarray], + out: np.ndarray, + fill_value, + mask_info, ) -> None: # this is not ideal, performance-wise, but it's better than raising # an exception (best to optimize in Cython to avoid getting here) - row_idx, col_idx = indexer + row_idx, col_idx = indexer # both np.intp if mask_info is not None: (row_mask, col_mask), (row_needs, col_needs) = mask_info else: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8d0c8e5f29413..94b4622fadb3d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4558,9 +4558,7 @@ def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame: indexer = row_indexer, col_indexer # error: Argument 2 to "take_2d_multi" has incompatible type "Tuple[Any, # Any]"; expected "ndarray" - new_values = take_2d_multi( - self.values, indexer, fill_value=fill_value # type: ignore[arg-type] - ) + new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers( diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index cd800b3f3a452..33dfde7dfef61 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1784,19 +1784,19 @@ def test_pad_backfill_object_segfault(): new = np.array([datetime(2010, 12, 31)], dtype="O") result = libalgos.pad["object"](old, new) - expected = np.array([-1], dtype=np.int64) + expected = np.array([-1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) result = libalgos.pad["object"](new, old) - expected = np.array([], dtype=np.int64) + expected = np.array([], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill["object"](old, new) - expected = np.array([-1], dtype=np.int64) + expected = np.array([-1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill["object"](new, old) - expected = np.array([], dtype=np.int64) + expected = np.array([], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @@ -1822,7 +1822,7 @@ def test_backfill(self): filler = libalgos.backfill["int64_t"](old.values, new.values) - expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.int64) + expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp) tm.assert_numpy_array_equal(filler, expect_filler) # corner case @@ -1830,7 +1830,7 @@ def test_backfill(self): new = Index(list(range(5, 10))) filler = libalgos.backfill["int64_t"](old.values, new.values) - expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) + expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(filler, expect_filler) def test_pad(self): @@ -1839,14 +1839,14 @@ def test_pad(self): filler = libalgos.pad["int64_t"](old.values, new.values) - expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.int64) + expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp) tm.assert_numpy_array_equal(filler, expect_filler) # corner case old = Index([5, 10]) new = Index(np.arange(5)) filler = libalgos.pad["int64_t"](old.values, new.values) - expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64) + expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(filler, expect_filler)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40623
2021-03-25T01:03:13Z
2021-03-26T13:05:46Z
2021-03-26T13:05:46Z
2021-03-26T15:24:11Z
PERF: cache_readonly for Block properties
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 29175d0b20f92..09e214237b736 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -36,6 +36,7 @@ Shape, final, ) +from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -165,7 +166,7 @@ class Block(libinternals.Block, PandasObject): _validate_ndim = True @final - @property + @cache_readonly def _consolidate_key(self): return self._can_consolidate, self.dtype.name @@ -188,7 +189,7 @@ def _can_hold_na(self) -> bool: return values._can_hold_na @final - @property + @cache_readonly def is_categorical(self) -> bool: warnings.warn( "Block.is_categorical is deprecated and will be removed in a " @@ -217,6 +218,7 @@ def internal_values(self): """ return self.values + @property def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. @@ -245,7 +247,7 @@ def get_block_values_for_json(self) -> np.ndarray: return np.asarray(self.values).reshape(self.shape) @final - @property + @cache_readonly def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) @@ -353,7 +355,7 @@ def shape(self) -> Shape: return self.values.shape @final - @property + @cache_readonly def dtype(self) -> DtypeObj: return self.values.dtype @@ -378,6 +380,11 @@ def delete(self, loc) -> None: """ self.values = np.delete(self.values, loc, 0) self.mgr_locs = self._mgr_locs.delete(loc) + try: + self._cache.clear() + except AttributeError: + # _cache not yet initialized + pass @final def apply(self, func, **kwargs) -> List[Block]: @@ -592,7 +599,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): """ values = self.values if values.dtype.kind in ["m", "M"]: - values = self.array_values() + values = self.array_values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) @@ -931,7 +938,7 @@ def setitem(self, indexer, value): return self.coerce_to_target_dtype(value).setitem(indexer, value) if self.dtype.kind in ["m", "M"]: - arr = self.array_values().T + arr = self.array_values.T arr[indexer] = value return self @@ -1445,7 +1452,7 @@ class ExtensionBlock(Block): values: ExtensionArray - @property + @cache_readonly def shape(self) -> Shape: # TODO(EA2D): override unnecessary with 2D EAs if self.ndim == 1: @@ -1476,6 +1483,12 @@ def set_inplace(self, locs, values): # see GH#33457 assert locs.tolist() == [0] self.values = values + try: + # TODO(GH33457) this can be removed + self._cache.clear() + except AttributeError: + # _cache not yet initialized + pass def putmask(self, mask, new) -> List[Block]: """ @@ -1500,7 +1513,7 @@ def is_view(self) -> bool: """Extension arrays are never treated as views.""" return False - @property + @cache_readonly def is_numeric(self): return self.values.dtype._is_numeric @@ -1549,6 +1562,7 @@ def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: # TODO(EA2D): reshape not needed with 2D EAs return np.asarray(self.values).reshape(self.shape) + @cache_readonly def array_values(self) -> ExtensionArray: return self.values @@ -1675,10 +1689,7 @@ def where(self, other, cond, errors="raise") -> List[Block]: # The default `other` for Series / Frame is np.nan # we want to replace that with the correct NA value # for the type - - # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no - # attribute "na_value" - other = self.dtype.na_value # type: ignore[union-attr] + other = self.dtype.na_value if is_sparse(self.values): # TODO(SparseArray.__setitem__): remove this if condition @@ -1739,10 +1750,11 @@ class HybridMixin: array_values: Callable def _can_hold_element(self, element: Any) -> bool: - values = self.array_values() + values = self.array_values try: - values._validate_setitem_value(element) + # error: "Callable[..., Any]" has no attribute "_validate_setitem_value" + values._validate_setitem_value(element) # type: ignore[attr-defined] return True except (ValueError, TypeError): return False @@ -1768,9 +1780,7 @@ def _can_hold_element(self, element: Any) -> bool: if isinstance(element, (IntegerArray, FloatingArray)): if element._mask.any(): return False - # error: Argument 1 to "can_hold_element" has incompatible type - # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" - return can_hold_element(self.dtype, element) # type: ignore[arg-type] + return can_hold_element(self.dtype, element) class NDArrayBackedExtensionBlock(HybridMixin, Block): @@ -1780,23 +1790,25 @@ class NDArrayBackedExtensionBlock(HybridMixin, Block): def internal_values(self): # Override to return DatetimeArray and TimedeltaArray - return self.array_values() + return self.array_values def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: """ return object dtype as boxed values, such as Timestamps/Timedelta """ - values = self.array_values() + values = self.array_values if is_object_dtype(dtype): # DTA/TDA constructor and astype can handle 2D - values = values.astype(object) + # error: "Callable[..., Any]" has no attribute "astype" + values = values.astype(object) # type: ignore[attr-defined] # TODO(EA2D): reshape not needed with 2D EAs return np.asarray(values).reshape(self.shape) def iget(self, key): # GH#31649 we need to wrap scalars in Timestamp/Timedelta # TODO(EA2D): this can be removed if we ever have 2D EA - return self.array_values().reshape(self.shape)[key] + # error: "Callable[..., Any]" has no attribute "reshape" + return self.array_values.reshape(self.shape)[key] # type: ignore[attr-defined] def putmask(self, mask, new) -> List[Block]: mask = extract_bool_array(mask) @@ -1805,14 +1817,16 @@ def putmask(self, mask, new) -> List[Block]: return self.astype(object).putmask(mask, new) # TODO(EA2D): reshape unnecessary with 2D EAs - arr = self.array_values().reshape(self.shape) + # error: "Callable[..., Any]" has no attribute "reshape" + arr = self.array_values.reshape(self.shape) # type: ignore[attr-defined] arr = cast("NDArrayBackedExtensionArray", arr) arr.T.putmask(mask, new) return [self] def where(self, other, cond, errors="raise") -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs - arr = self.array_values().reshape(self.shape) + # error: "Callable[..., Any]" has no attribute "reshape" + arr = self.array_values.reshape(self.shape) # type: ignore[attr-defined] cond = extract_bool_array(cond) @@ -1848,15 +1862,17 @@ def diff(self, n: int, axis: int = 0) -> List[Block]: by apply. """ # TODO(EA2D): reshape not necessary with 2D EAs - values = self.array_values().reshape(self.shape) + # error: "Callable[..., Any]" has no attribute "reshape" + values = self.array_values.reshape(self.shape) # type: ignore[attr-defined] new_values = values - values.shift(n, axis=axis) new_values = maybe_coerce_values(new_values) return [self.make_block(new_values)] def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> List[Block]: - # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs - values = self.array_values().reshape(self.shape) + # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EA + # error: "Callable[..., Any]" has no attribute "reshape" + values = self.array_values.reshape(self.shape) # type: ignore[attr-defined] new_values = values.shift(periods, fill_value=fill_value, axis=axis) new_values = maybe_coerce_values(new_values) return [self.make_block_same_class(new_values)] @@ -1871,9 +1887,13 @@ def fillna( # TODO: don't special-case td64 return self.astype(object).fillna(value, limit, inplace, downcast) - values = self.array_values() - values = values if inplace else values.copy() - new_values = values.fillna(value=value, limit=limit) + values = self.array_values + # error: "Callable[..., Any]" has no attribute "copy" + values = values if inplace else values.copy() # type: ignore[attr-defined] + # error: "Callable[..., Any]" has no attribute "fillna" + new_values = values.fillna( # type: ignore[attr-defined] + value=value, limit=limit + ) new_values = maybe_coerce_values(new_values) return [self.make_block_same_class(values=new_values)] @@ -1883,6 +1903,7 @@ class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): is_numeric = False + @cache_readonly def array_values(self): return ensure_wrapped_if_datetimelike(self.values) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 14fa994631623..28151a43d1dac 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1668,7 +1668,7 @@ def internal_values(self): def array_values(self): """The array that Series.array returns""" - return self._block.array_values() + return self._block.array_values @property def _can_hold_na(self) -> bool:
We'll be able to cache more aggressively following each of #35417, #40574, and #40263
https://api.github.com/repos/pandas-dev/pandas/pulls/40620
2021-03-24T21:07:12Z
2021-03-29T14:42:30Z
2021-03-29T14:42:30Z
2021-03-29T14:46:12Z
REF: share _maybe_downcast between Block subclasses
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 7d8dcb34ed582..44af4aa767a08 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -510,8 +510,19 @@ def split_and_operate(self, func, *args, **kwargs) -> List[Block]: res_blocks.extend(rbs) return res_blocks + @final def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: + if self.dtype == _dtype_obj: + # TODO: why is behavior different for object dtype? + if downcast is not None: + return blocks + + # split and convert the blocks + return extend_blocks( + [blk.convert(datetime=True, numeric=False) for blk in blocks] + ) + # no need to downcast our float # unless indicated if downcast is None and self.dtype.kind in ["f", "m", "M"]: @@ -520,6 +531,7 @@ def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: return extend_blocks([b.downcast(downcast) for b in blocks]) + @final def downcast(self, dtypes=None) -> List[Block]: """ try to downcast each item to the dict of dtypes if present """ # turn it off completely @@ -1375,6 +1387,7 @@ def _unstack(self, unstacker, fill_value, new_placement): blocks = [new_block(new_values, placement=new_placement, ndim=2)] return blocks, mask + @final def quantile( self, qs: Float64Index, interpolation="linear", axis: int = 0 ) -> Block: @@ -1952,14 +1965,6 @@ def convert( res_values = ensure_block_shape(res_values, self.ndim) return [self.make_block(res_values)] - def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: - - if downcast is not None: - return blocks - - # split and convert the blocks - return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) - def _can_hold_element(self, element: Any) -> bool: return True
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40619
2021-03-24T21:00:45Z
2021-03-24T23:11:20Z
2021-03-24T23:11:20Z
2021-03-24T23:14:52Z
REF: move repeated logic from Manager.insert to DataFrame.insert
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8d0c8e5f29413..32439af6db238 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4298,8 +4298,14 @@ def insert(self, loc, column, value, allow_duplicates: bool = False) -> None: "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) + if not allow_duplicates and column in self.columns: + # Should this be a different kind of error?? + raise ValueError(f"cannot insert {column}, already exists") + if not isinstance(loc, int): + raise TypeError("loc must be int") + value = self._sanitize_column(value) - self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates) + self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 99a1706c671b1..950d229c45f9e 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -904,7 +904,7 @@ def iset(self, loc: Union[int, slice, np.ndarray], value): self.arrays[mgr_idx] = value_arr return - def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False): + def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. @@ -912,25 +912,18 @@ def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False ---------- loc : int item : hashable - value : array_like - allow_duplicates: bool - If False, trying to insert non-unique item will raise - + value : np.ndarray or ExtensionArray """ - if not allow_duplicates and item in self.items: - # Should this be a different kind of error?? - raise ValueError(f"cannot insert {item}, already exists") - - if not isinstance(loc, int): - raise TypeError("loc must be int") - # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) value = extract_array(value, extract_numpy=True) if value.ndim == 2: if value.shape[0] == 1: - value = value[0, :] + # error: Invalid index type "Tuple[int, slice]" for + # "Union[Any, ExtensionArray, ndarray]"; expected type + # "Union[int, slice, ndarray]" + value = value[0, :] # type: ignore[index] else: raise ValueError( f"Expected a 1D array, got an array with shape {value.shape}" diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e4cce731b7b56..14fa994631623 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1208,9 +1208,7 @@ def value_getitem(placement): # Newly created block's dtype may already be present. self._known_consolidated = False - def insert( - self, loc: int, item: Hashable, value: ArrayLike, allow_duplicates: bool = False - ): + def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. @@ -1219,17 +1217,7 @@ def insert( loc : int item : hashable value : np.ndarray or ExtensionArray - allow_duplicates: bool - If False, trying to insert non-unique item will raise - """ - if not allow_duplicates and item in self.items: - # Should this be a different kind of error?? - raise ValueError(f"cannot insert {item}, already exists") - - if not isinstance(loc, int): - raise TypeError("loc must be int") - # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item)
https://api.github.com/repos/pandas-dev/pandas/pulls/40618
2021-03-24T20:53:54Z
2021-03-25T01:53:18Z
2021-03-25T01:53:17Z
2021-03-25T02:28:38Z
REF: share IntervalIndex._simple_new with ExtensionIndex
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 02fb6c6beb391..76c16dee1cda1 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -231,6 +231,38 @@ class ExtensionIndex(Index): _data: Union[IntervalArray, NDArrayBackedExtensionArray] + _data_cls: Union[ + Type[Categorical], + Type[DatetimeArray], + Type[TimedeltaArray], + Type[PeriodArray], + Type[IntervalArray], + ] + + @classmethod + def _simple_new( + cls, + array: Union[IntervalArray, NDArrayBackedExtensionArray], + name: Hashable = None, + ): + """ + Construct from an ExtensionArray of the appropriate type. + + Parameters + ---------- + array : ExtensionArray + name : Label, default None + Attached as result.name + """ + assert isinstance(array, cls._data_cls), type(array) + + result = object.__new__(cls) + result._data = array + result._name = name + result._cache = {} + result._reset_identity() + return result + __eq__ = _make_wrapped_comparison_op("__eq__") __ne__ = _make_wrapped_comparison_op("__ne__") __lt__ = _make_wrapped_comparison_op("__lt__") @@ -362,30 +394,17 @@ class NDArrayBackedExtensionIndex(ExtensionIndex): _data: NDArrayBackedExtensionArray - _data_cls: Union[ - Type[Categorical], - Type[DatetimeArray], - Type[TimedeltaArray], - Type[PeriodArray], - ] - @classmethod def _simple_new( cls, values: NDArrayBackedExtensionArray, name: Hashable = None, ): - assert isinstance(values, cls._data_cls), type(values) - - result = object.__new__(cls) - result._data = values - result._name = name - result._cache = {} + result = super()._simple_new(values, name) # For groupby perf. See note in indexes/base about _index_data result._index_data = values._ndarray - result._reset_identity() return result def _get_engine_target(self) -> np.ndarray: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 1b286f258d72c..9bfc21a940917 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -187,6 +187,31 @@ def wrapped(self, other, sort=False): return wrapped +def _setop(op_name: str): + """ + Implement set operation. + """ + + def func(self, other, sort=None): + # At this point we are assured + # isinstance(other, IntervalIndex) + # other.closed == self.closed + + result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort) + result_name = get_op_result_name(self, other) + + # GH 19101: ensure empty results have correct dtype + if result.empty: + result = result._values.astype(self.dtype.subtype) + else: + result = result._values + + return type(self).from_tuples(result, closed=self.closed, name=result_name) + + func.__name__ = op_name + return setop_check(func) + + @Appender( _interval_shared_docs["class"] % { @@ -218,19 +243,38 @@ def wrapped(self, other, sort=False): } ) @inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True) -@inherit_names(["__array__", "overlaps", "contains"], IntervalArray) +@inherit_names( + [ + "__array__", + "overlaps", + "contains", + "closed_left", + "closed_right", + "open_left", + "open_right", + "is_empty", + ], + IntervalArray, +) @inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True) -class IntervalIndex(IntervalMixin, ExtensionIndex): +class IntervalIndex(ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] _attributes = ["name", "closed"] + # annotate properties pinned via inherit_names + closed: str + is_non_overlapping_monotonic: bool + closed_left: bool + closed_right: bool + # we would like our indexing holder to defer to us _defer_to_indexing = True _data: IntervalArray _values: IntervalArray _can_hold_strings = False + _data_cls = IntervalArray # -------------------------------------------------------------------- # Constructors @@ -241,7 +285,7 @@ def __new__( closed=None, dtype: Optional[Dtype] = None, copy: bool = False, - name=None, + name: Hashable = None, verify_integrity: bool = True, ): @@ -258,26 +302,6 @@ def __new__( return cls._simple_new(array, name) - @classmethod - def _simple_new(cls, array: IntervalArray, name: Hashable = None): - """ - Construct from an IntervalArray - - Parameters - ---------- - array : IntervalArray - name : Label, default None - Attached as result.name - """ - assert isinstance(array, IntervalArray), type(array) - - result = IntervalMixin.__new__(cls) - result._data = array - result.name = name - result._cache = {} - result._reset_identity() - return result - @classmethod @Appender( _interval_shared_docs["from_breaks"] @@ -605,7 +629,7 @@ def _searchsorted_monotonic(self, label, side: str = "left"): "non-overlapping and all monotonic increasing or decreasing" ) - if isinstance(label, IntervalMixin): + if isinstance(label, (IntervalMixin, IntervalIndex)): raise NotImplementedError("Interval objects are not currently supported") # GH 20921: "not is_monotonic_increasing" for the second condition @@ -1012,26 +1036,6 @@ def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex: return self[mask] - def _setop(op_name: str, sort=None): - def func(self, other, sort=sort): - # At this point we are assured - # isinstance(other, IntervalIndex) - # other.closed == self.closed - - result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort) - result_name = get_op_result_name(self, other) - - # GH 19101: ensure empty results have correct dtype - if result.empty: - result = result._values.astype(self.dtype.subtype) - else: - result = result._values - - return type(self).from_tuples(result, closed=self.closed, name=result_name) - - func.__name__ = op_name - return setop_check(func) - _union = _setop("union") _difference = _setop("difference")
By not mixing IntervalMixin into IntervalIndex, mypy also starts taking it more seriously.
https://api.github.com/repos/pandas-dev/pandas/pulls/40617
2021-03-24T20:45:31Z
2021-03-24T23:10:52Z
2021-03-24T23:10:52Z
2021-03-24T23:12:44Z
DOC: Fix read_gbq docstring typo
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 215d966609ab4..562a62da369ae 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -159,7 +159,7 @@ def read_gbq( Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. - Note that his feature requires version 0.12.0 or later of the + Note that this feature requires version 0.12.0 or later of the ``pandas-gbq`` package. And it requires the ``tqdm`` package. Slightly different than ``pandas-gbq``, here the default is ``None``.
https://api.github.com/repos/pandas-dev/pandas/pulls/40615
2021-03-24T18:05:16Z
2021-03-24T19:06:05Z
2021-03-24T19:06:05Z
2021-03-24T19:19:01Z
REF: remove DatetimeBlock, TimeDeltaBlock
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index f0018928255e6..af1350f088b7a 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -9,12 +9,10 @@ ) from pandas.core.internals.blocks import ( # io.pytables, io.packers Block, - DatetimeBlock, DatetimeTZBlock, ExtensionBlock, NumericBlock, ObjectBlock, - TimeDeltaBlock, ) from pandas.core.internals.concat import concatenate_managers from pandas.core.internals.managers import ( @@ -28,11 +26,9 @@ "Block", "CategoricalBlock", "NumericBlock", - "DatetimeBlock", "DatetimeTZBlock", "ExtensionBlock", "ObjectBlock", - "TimeDeltaBlock", "make_block", "DataManager", "ArrayManager", diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 367fe04678cd8..294d1fd078b08 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -471,7 +471,7 @@ def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no # attribute "tz" if hasattr(arr, "tz") and arr.tz is None: # type: ignore[union-attr] - # DatetimeArray needs to be converted to ndarray for DatetimeBlock + # DatetimeArray needs to be converted to ndarray for DatetimeLikeBlock # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no # attribute "_data" diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2df1b62fb8221..d1d0db913f854 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -50,7 +50,6 @@ is_dtype_equal, is_extension_array_dtype, is_list_like, - is_object_dtype, is_sparse, pandas_dtype, ) @@ -207,13 +206,6 @@ def is_bool(self) -> bool: def external_values(self): return external_values(self.values) - @final - def internal_values(self): - """ - The array that Series._values returns (internal values). - """ - return self.values - @property def array_values(self) -> ExtensionArray: """ @@ -1771,7 +1763,8 @@ def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: return object dtype as boxed values, such as Timestamps/Timedelta """ values = self.values - if is_object_dtype(dtype): + if dtype == _dtype_obj: + # DTA/TDA constructor and astype can handle 2D values = values.astype(object) # TODO(EA2D): reshape not needed with 2D EAs return np.asarray(values).reshape(self.shape) @@ -1821,7 +1814,7 @@ def diff(self, n: int, axis: int = 0) -> List[Block]: Returns ------- - A list with a new TimeDeltaBlock. + A list with a new Block. Notes ----- @@ -1869,19 +1862,16 @@ def delete(self, loc) -> None: pass -class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): - """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" - - values: Union[DatetimeArray, TimedeltaArray] +class DatetimeLikeBlock(NDArrayBackedExtensionBlock): + """Mixin class for DatetimeLikeBlock, DatetimeTZBlock.""" + __slots__ = () is_numeric = False - -class DatetimeBlock(DatetimeLikeBlockMixin): - __slots__ = () + values: Union[DatetimeArray, TimedeltaArray] -class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): +class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlock): """ implement a datetime64 block with a tz attribute """ values: DatetimeArray @@ -1890,10 +1880,12 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): is_extension = True is_numeric = False - diff = DatetimeBlock.diff - where = DatetimeBlock.where - putmask = DatetimeLikeBlockMixin.putmask - fillna = DatetimeLikeBlockMixin.fillna + diff = NDArrayBackedExtensionBlock.diff + where = NDArrayBackedExtensionBlock.where + putmask = NDArrayBackedExtensionBlock.putmask + fillna = NDArrayBackedExtensionBlock.fillna + + get_values = NDArrayBackedExtensionBlock.get_values # error: Incompatible types in assignment (expression has type # "Callable[[NDArrayBackedExtensionBlock], bool]", base class "ExtensionBlock" @@ -1901,10 +1893,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): is_view = NDArrayBackedExtensionBlock.is_view # type: ignore[assignment] -class TimeDeltaBlock(DatetimeLikeBlockMixin): - __slots__ = () - - class ObjectBlock(Block): __slots__ = () is_object = True @@ -2022,10 +2010,8 @@ def get_block_type(values, dtype: Optional[Dtype] = None): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock - elif kind == "M": - cls = DatetimeBlock - elif kind == "m": - cls = TimeDeltaBlock + elif kind in ["M", "m"]: + cls = DatetimeLikeBlock elif kind in ["f", "c", "i", "u", "b"]: cls = NumericBlock else: diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index a5c1f3985e70e..19c9b27db9f70 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -507,12 +507,16 @@ def _is_uniform_join_units(join_units: List[JoinUnit]) -> bool: _concatenate_join_units (which uses `concat_compat`). """ - # TODO: require dtype match in addition to same type? e.g. DatetimeTZBlock - # cannot necessarily join return ( # all blocks need to have the same type all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa and + # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform + all( + is_dtype_equal(ju.block.dtype, join_units[0].block.dtype) + for ju in join_units + ) + and # no blocks that would get missing values (can lead to type upcasts) # unless we're an extension dtype. all(not ju.is_na or ju.block.is_extension for ju in join_units) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 3c8d942554575..b688f1b4fea5f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -35,7 +35,6 @@ from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( - DT64NS_DTYPE, ensure_int64, is_dtype_equal, is_extension_array_dtype, @@ -1639,7 +1638,7 @@ def external_values(self): def internal_values(self): """The array that Series._values returns""" - return self._block.internal_values() + return self._block.values def array_values(self): """The array that Series.array returns""" @@ -1794,17 +1793,11 @@ def _form_blocks( ) blocks.extend(numeric_blocks) - if len(items_dict["TimeDeltaBlock"]): - timedelta_blocks = _multi_blockify( - items_dict["TimeDeltaBlock"], consolidate=consolidate + if len(items_dict["DatetimeLikeBlock"]): + dtlike_blocks = _multi_blockify( + items_dict["DatetimeLikeBlock"], consolidate=consolidate ) - blocks.extend(timedelta_blocks) - - if len(items_dict["DatetimeBlock"]): - datetime_blocks = _simple_blockify( - items_dict["DatetimeBlock"], DT64NS_DTYPE, consolidate=consolidate - ) - blocks.extend(datetime_blocks) + blocks.extend(dtlike_blocks) if len(items_dict["DatetimeTZBlock"]): dttz_blocks = [ diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index bc1d4605e985a..dbb5cb357de47 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -336,7 +336,7 @@ def test_quantile_box(self): ) tm.assert_frame_equal(res, exp) - # DatetimeBlock may be consolidated and contain NaT in different loc + # DatetimeLikeBlock may be consolidated and contain NaT in different loc df = DataFrame( { "A": [ diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py index 60fbd2da70e79..0062d5aa34319 100644 --- a/pandas/tests/internals/test_api.py +++ b/pandas/tests/internals/test_api.py @@ -27,11 +27,9 @@ def test_namespace(): expected = [ "Block", "NumericBlock", - "DatetimeBlock", "DatetimeTZBlock", "ExtensionBlock", "ObjectBlock", - "TimeDeltaBlock", "make_block", "DataManager", "ArrayManager", diff --git a/pandas/tests/series/methods/test_dropna.py b/pandas/tests/series/methods/test_dropna.py index 1c7c52d228cfa..5bff7306fac33 100644 --- a/pandas/tests/series/methods/test_dropna.py +++ b/pandas/tests/series/methods/test_dropna.py @@ -70,7 +70,7 @@ def test_dropna_period_dtype(self): tm.assert_series_equal(result, expected) def test_datetime64_tz_dropna(self): - # DatetimeBlock + # DatetimeLikeBlock ser = Series( [ Timestamp("2011-01-01 10:00"), @@ -85,7 +85,7 @@ def test_datetime64_tz_dropna(self): ) tm.assert_series_equal(result, expected) - # DatetimeBlockTZ + # DatetimeTZBlock idx = DatetimeIndex( ["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz="Asia/Tokyo" ) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index cf6b357d0a418..51864df915f8c 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -334,7 +334,7 @@ def test_datetime64_fillna(self): @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) def test_datetime64_tz_fillna(self, tz): - # DatetimeBlock + # DatetimeLikeBlock ser = Series( [ Timestamp("2011-01-01 10:00"), @@ -414,7 +414,7 @@ def test_datetime64_tz_fillna(self, tz): tm.assert_series_equal(expected, result) tm.assert_series_equal(isna(ser), null_loc) - # DatetimeBlockTZ + # DatetimeTZBlock idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz) ser = Series(idx) assert ser.dtype == f"datetime64[ns, {tz}]"
sits on top of #40456
https://api.github.com/repos/pandas-dev/pandas/pulls/40614
2021-03-24T17:49:41Z
2021-04-02T15:18:44Z
2021-04-02T15:18:43Z
2021-04-06T18:24:00Z
TYP: get_indexer
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 310ee4c3a63e3..7f141531e0d71 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -13,6 +13,7 @@ Sequence, TypeVar, cast, + overload, ) import warnings @@ -159,6 +160,8 @@ ) if TYPE_CHECKING: + from typing import Literal + from pandas import ( CategoricalIndex, DataFrame, @@ -5193,7 +5196,8 @@ def set_value(self, arr, key, value): """ @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) - def get_indexer_non_unique(self, target): + def get_indexer_non_unique(self, target) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp target = ensure_index(target) if not self._should_compare(target) and not is_interval_dtype(self.dtype): @@ -5217,7 +5221,7 @@ def get_indexer_non_unique(self, target): tgt_values = target._get_engine_target() indexer, missing = self._engine.get_indexer_non_unique(tgt_values) - return ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), ensure_platform_int(missing) @final def get_indexer_for(self, target, **kwargs) -> np.ndarray: @@ -5237,8 +5241,31 @@ def get_indexer_for(self, target, **kwargs) -> np.ndarray: indexer, _ = self.get_indexer_non_unique(target) return indexer + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: Literal[True] = ... + ) -> np.ndarray: + # returned ndarray is np.intp + ... + + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: Literal[False] + ) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp + ... + + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: bool = True + ) -> np.ndarray | tuple[np.ndarray, np.ndarray]: + # any returned ndarrays are np.intp + ... + @final - def _get_indexer_non_comparable(self, target: Index, method, unique: bool = True): + def _get_indexer_non_comparable( + self, target: Index, method, unique: bool = True + ) -> np.ndarray | tuple[np.ndarray, np.ndarray]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 5b98b956e33e6..b20926ee85a3d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -508,6 +508,7 @@ def _get_indexer( limit: int | None = None, tolerance=None, ) -> np.ndarray: + # returned ndarray is np.intp if self.equals(target): return np.arange(len(self), dtype="intp") @@ -515,11 +516,15 @@ def _get_indexer( return self._get_indexer_non_unique(target._values)[0] @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) - def get_indexer_non_unique(self, target): + def get_indexer_non_unique(self, target) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp target = ibase.ensure_index(target) return self._get_indexer_non_unique(target._values) - def _get_indexer_non_unique(self, values: ArrayLike): + def _get_indexer_non_unique( + self, values: ArrayLike + ) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp """ get_indexer_non_unique but after unrapping the target Index object. """ @@ -538,7 +543,7 @@ def _get_indexer_non_unique(self, values: ArrayLike): codes = self.categories.get_indexer(values) indexer, missing = self._engine.get_indexer_non_unique(codes) - return ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), ensure_platform_int(missing) @doc(Index._convert_list_indexer) def _convert_list_indexer(self, keyarr): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 171ab57264f85..d7b5f66bd385f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -721,6 +721,7 @@ def _get_indexer( limit: int | None = None, tolerance: Any | None = None, ) -> np.ndarray: + # returned ndarray is np.intp if isinstance(target, IntervalIndex): # equal indexes -> 1:1 positional match @@ -753,6 +754,7 @@ def _get_indexer( @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) def get_indexer_non_unique(self, target: Index) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp target = ensure_index(target) if isinstance(target, IntervalIndex) and not self._should_compare(target): @@ -772,6 +774,7 @@ def get_indexer_non_unique(self, target: Index) -> tuple[np.ndarray, np.ndarray] return ensure_platform_int(indexer), ensure_platform_int(missing) def _get_indexer_pointwise(self, target: Index) -> tuple[np.ndarray, np.ndarray]: + # both returned ndarrays are np.intp """ pointwise implementation for get_indexer and get_indexer_non_unique. """
@simonjayhawkins im at a loss on why im getting a mypy complaint: ``` error: Overloaded function signatures 1 and 2 overlap with incompatible return types [misc] ``` AFAICT they should be compatible. Am I missing something obvious?
https://api.github.com/repos/pandas-dev/pandas/pulls/40612
2021-03-24T15:42:31Z
2021-04-19T18:54:32Z
2021-04-19T18:54:32Z
2021-04-19T19:00:57Z
PERF: increase the minimum number of elements to use numexpr for ops from 1e4 to 1e6
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index ae1928b8535f9..8205534c9d48b 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -38,7 +38,7 @@ } # the minimum prod shape that we will use numexpr -_MIN_ELEMENTS = 10000 +_MIN_ELEMENTS = 1_000_000 def set_use_numexpr(v=True): diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 43b119e7e1087..e94cb23b359d0 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -48,9 +48,7 @@ class TestExpressions: def setup_method(self, method): self.frame = _frame.copy() - self.array = _array.copy() self.frame2 = _frame2.copy() - self.array2 = _array2.copy() self.mixed = _mixed.copy() self.mixed2 = _mixed2.copy() self._MIN_ELEMENTS = expr._MIN_ELEMENTS @@ -138,23 +136,19 @@ def test_arithmetic(self, df, flex): self.run_frame(df, df, flex) def test_invalid(self): + array = np.random.randn(1_000_001) + array2 = np.random.randn(100) # no op - result = expr._can_use_numexpr( - operator.add, None, self.array, self.array, "evaluate" - ) + result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate") assert not result # min elements - result = expr._can_use_numexpr( - operator.add, "+", self.array2, self.array2, "evaluate" - ) + result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate") assert not result # ok, we only check on first part of expression - result = expr._can_use_numexpr( - operator.add, "+", self.array, self.array2, "evaluate" - ) + result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate") assert result @pytest.mark.parametrize(
Closes #40500, see that issue for the analysis of speedup vs size. Another take of https://github.com/pandas-dev/pandas/pull/40502, but hopefully now without crashing the tests because of increased memory use.
https://api.github.com/repos/pandas-dev/pandas/pulls/40609
2021-03-24T08:42:22Z
2021-03-24T14:08:52Z
2021-03-24T14:08:51Z
2021-03-24T14:10:22Z
REGR: replace with multivalued regex raising
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 45d131327630e..26d768f830830 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) - Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) +- Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 7d8dcb34ed582..c177618827edb 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -801,10 +801,20 @@ def _replace_list( rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): + convert = i == src_len # only convert once at the end new_rb: List[Block] = [] - for blk in rb: - m = masks[i] - convert = i == src_len # only convert once at the end + + # GH-39338: _replace_coerce can split a block into + # single-column blocks, so track the index so we know + # where to index into the mask + for blk_num, blk in enumerate(rb): + if len(rb) == 1: + m = masks[i] + else: + mib = masks[i] + assert not isinstance(mib, bool) + m = mib[blk_num : blk_num + 1] + result = blk._replace_coerce( to_replace=src, value=dest, diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 56750da7c90b2..d8f93f047e74b 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -654,6 +654,28 @@ def test_regex_replace_numeric_to_object_conversion(self, mix_abc): tm.assert_frame_equal(res, expec) assert res.a.dtype == np.object_ + @pytest.mark.parametrize( + "to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}] + ) + def test_joint_simple_replace_and_regex_replace(self, to_replace): + # GH-39338 + df = DataFrame( + { + "col1": ["1,000", "a", "3"], + "col2": ["a", "", "b"], + "col3": ["a", "b", "c"], + } + ) + result = df.replace(regex=to_replace) + expected = DataFrame( + { + "col1": ["1000", "a", "3"], + "col2": ["a", np.nan, "b"], + "col3": ["a", "b", "c"], + } + ) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"]) def test_replace_regex_metachar(self, metachar): df = DataFrame({"a": [metachar, "else"]})
- [x] closes #39338 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Issue comes from the first pair in the `regex` arg hitting the simple replace path, which causes the `ObjectBlock` to be split. From here, the mask indexing will be wrong, but that will not cause an error unless a later pair hits `_replace_regex`, which actually uses the mask.
https://api.github.com/repos/pandas-dev/pandas/pulls/40604
2021-03-24T02:29:31Z
2021-03-25T01:51:54Z
2021-03-25T01:51:54Z
2021-03-26T11:24:25Z
ENH: Categorical.empty
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index d4e5ca00b06dd..e97687de34273 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -15,6 +15,7 @@ F, PositionalIndexer2D, Shape, + type_t, ) from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -28,6 +29,7 @@ ) from pandas.core.dtypes.common import is_dtype_equal +from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import array_equivalent from pandas.core import missing @@ -465,3 +467,24 @@ def value_counts(self, dropna: bool = True): index_arr = self._from_backing_data(np.asarray(result.index._data)) index = Index(index_arr, name=result.index.name) return Series(result._values, index=index, name=result.name) + + # ------------------------------------------------------------------------ + # numpy-like methods + + @classmethod + def _empty( + cls: type_t[NDArrayBackedExtensionArrayT], shape: Shape, dtype: ExtensionDtype + ) -> NDArrayBackedExtensionArrayT: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : ExtensionDtype + """ + # The base implementation uses a naive approach to find the dtype + # for the backing ndarray + arr = cls._from_sequence([], dtype=dtype) + backing = np.empty(shape, dtype=arr._ndarray.dtype) + return arr._from_backing_data(backing) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 933b829e0b29f..02731bd4fbbc1 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1304,6 +1304,21 @@ def delete(self: ExtensionArrayT, loc) -> ExtensionArrayT: indexer = np.delete(np.arange(len(self)), loc) return self.take(indexer) + @classmethod + def _empty(cls, shape: Shape, dtype: ExtensionDtype): + """ + Create an ExtensionArray with the given shape and dtype. + """ + obj = cls._from_sequence([], dtype=dtype) + + taker = np.broadcast_to(np.intp(-1), shape) + result = obj.take(taker, allow_fill=True) + if not isinstance(result, cls) or dtype != result.dtype: + raise NotImplementedError( + f"Default 'empty' implementation is invalid for dtype='{dtype}'" + ) + return result + class ExtensionOpsMixin: """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ad4c3181424e2..f2b5ad447a0cf 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -30,6 +30,8 @@ NpDtype, Ordered, Scalar, + Shape, + type_t, ) from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -1525,6 +1527,30 @@ def value_counts(self, dropna: bool = True): return Series(count, index=CategoricalIndex(ix), dtype="int64") + # error: Argument 2 of "_empty" is incompatible with supertype + # "NDArrayBackedExtensionArray"; supertype defines the argument type as + # "ExtensionDtype" + @classmethod + def _empty( # type: ignore[override] + cls: type_t[Categorical], shape: Shape, dtype: CategoricalDtype + ) -> Categorical: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : CategoricalDtype + """ + arr = cls._from_sequence([], dtype=dtype) + + # We have to use np.zeros instead of np.empty otherwise the resulting + # ndarray may contain codes not supported by this dtype, in which + # case repr(result) could segfault. + backing = np.zeros(shape, dtype=arr._ndarray.dtype) + + return arr._from_backing_data(backing) + def _internal_get_values(self): """ Return the values. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index c4b70fa9613bf..600aacec9c87a 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -254,6 +254,12 @@ def _from_sequence_of_strings( ): return cls._from_sequence(strings, dtype=dtype, copy=copy) + @classmethod + def _empty(cls, shape, dtype) -> StringArray: + values = np.empty(shape, dtype=object) + values[:] = libmissing.NA + return cls(values).astype(dtype, copy=False) + def __arrow_array__(self, type=None): """ Convert myself into a pyarrow Array. diff --git a/pandas/tests/arrays/test_ndarray_backed.py b/pandas/tests/arrays/test_ndarray_backed.py new file mode 100644 index 0000000000000..c48fb7e78d45b --- /dev/null +++ b/pandas/tests/arrays/test_ndarray_backed.py @@ -0,0 +1,75 @@ +""" +Tests for subclasses of NDArrayBackedExtensionArray +""" +import numpy as np + +from pandas import ( + CategoricalIndex, + date_range, +) +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + PandasArray, + TimedeltaArray, +) + + +class TestEmpty: + def test_empty_categorical(self): + ci = CategoricalIndex(["a", "b", "c"], ordered=True) + dtype = ci.dtype + + # case with int8 codes + shape = (4,) + result = Categorical._empty(shape, dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == shape + assert result._ndarray.dtype == np.int8 + + # case where repr would segfault if we didn't override base implementation + result = Categorical._empty((4096,), dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == (4096,) + assert result._ndarray.dtype == np.int8 + repr(result) + + # case with int16 codes + ci = CategoricalIndex(list(range(512)) * 4, ordered=False) + dtype = ci.dtype + result = Categorical._empty(shape, dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == shape + assert result._ndarray.dtype == np.int16 + + def test_empty_dt64tz(self): + dti = date_range("2016-01-01", periods=2, tz="Asia/Tokyo") + dtype = dti.dtype + + shape = (0,) + result = DatetimeArray._empty(shape, dtype=dtype) + assert result.dtype == dtype + assert isinstance(result, DatetimeArray) + assert result.shape == shape + + def test_empty_dt64(self): + shape = (3, 9) + result = DatetimeArray._empty(shape, dtype="datetime64[ns]") + assert isinstance(result, DatetimeArray) + assert result.shape == shape + + def test_empty_td64(self): + shape = (3, 9) + result = TimedeltaArray._empty(shape, dtype="m8[ns]") + assert isinstance(result, TimedeltaArray) + assert result.shape == shape + + def test_empty_pandas_array(self): + arr = PandasArray(np.array([1, 2])) + dtype = arr.dtype + + shape = (3, 9) + result = PandasArray._empty(shape, dtype=dtype) + assert isinstance(result, PandasArray) + assert result.dtype == dtype + assert result.shape == shape diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 829be279b45d3..6a16433aa0a32 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -82,6 +82,10 @@ def test_series_constructor_scalar_na_with_index(self, dtype, na_value): def test_construct_empty_dataframe(self, dtype): super().test_construct_empty_dataframe(dtype) + @pytest.mark.xfail(reason="_from_sequence ignores dtype keyword") + def test_empty(self, dtype): + super().test_empty(dtype) + class TestReduce(base.BaseNoReduceTests): def test_reduce_series_boolean(self): diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index e2323620daa0e..56c3f8216f033 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -122,3 +122,10 @@ def test_construct_empty_dataframe(self, dtype): {"a": pd.array([], dtype=dtype)}, index=pd.Index([], dtype="object") ) self.assert_frame_equal(result, expected) + + def test_empty(self, dtype): + cls = dtype.construct_array_type() + result = cls._empty((4,), dtype=dtype) + + assert isinstance(result, cls) + assert result.dtype == dtype diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 3f1f2c02c79f7..ea8b1cfb738f5 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -117,7 +117,14 @@ def test_contains(self, data, data_missing): class TestConstructors(base.BaseConstructorsTests): - pass + def test_empty(self, dtype): + cls = dtype.construct_array_type() + result = cls._empty((4,), dtype=dtype) + + assert isinstance(result, cls) + # the dtype we passed is not initialized, so will not match the + # dtype on our result. + assert result.dtype == CategoricalDtype([]) class TestReshaping(base.BaseReshapingTests):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry xref #39776 xref https://github.com/dask/fastparquet/issues/576#issuecomment-805220721
https://api.github.com/repos/pandas-dev/pandas/pulls/40602
2021-03-23T23:28:00Z
2021-04-13T14:17:30Z
2021-04-13T14:17:29Z
2021-04-13T14:25:57Z
CLN libjoin int64->intp
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 7888a15a7cb26..b69b89c0de019 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -231,7 +231,7 @@ cdef ndarray[intp_t] _get_result_indexer( return res -def ffill_indexer(const intp_t[:] indexer): +def ffill_indexer(const intp_t[:] indexer) -> np.ndarray: cdef: Py_ssize_t i, n = len(indexer) ndarray[intp_t] result @@ -275,7 +275,7 @@ ctypedef fused join_t: def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right): cdef: Py_ssize_t i, j, nleft, nright - ndarray[int64_t] indexer + ndarray[intp_t] indexer join_t lval, rval i = 0 @@ -283,7 +283,7 @@ def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right): nleft = len(left) nright = len(right) - indexer = np.empty(nleft, dtype=np.int64) + indexer = np.empty(nleft, dtype=np.intp) while True: if i == nleft: break @@ -324,7 +324,7 @@ def left_join_indexer(ndarray[join_t] left, ndarray[join_t] right): cdef: Py_ssize_t i, j, k, nright, nleft, count join_t lval, rval - ndarray[int64_t] lindexer, rindexer + ndarray[intp_t] lindexer, rindexer ndarray[join_t] result nleft = len(left) @@ -366,8 +366,8 @@ def left_join_indexer(ndarray[join_t] left, ndarray[join_t] right): # do it again now that result size is known - lindexer = np.empty(count, dtype=np.int64) - rindexer = np.empty(count, dtype=np.int64) + lindexer = np.empty(count, dtype=np.intp) + rindexer = np.empty(count, dtype=np.intp) result = np.empty(count, dtype=left.dtype) i = 0 @@ -427,7 +427,7 @@ def inner_join_indexer(ndarray[join_t] left, ndarray[join_t] right): cdef: Py_ssize_t i, j, k, nright, nleft, count join_t lval, rval - ndarray[int64_t] lindexer, rindexer + ndarray[intp_t] lindexer, rindexer ndarray[join_t] result nleft = len(left) @@ -468,8 +468,8 @@ def inner_join_indexer(ndarray[join_t] left, ndarray[join_t] right): # do it again now that result size is known - lindexer = np.empty(count, dtype=np.int64) - rindexer = np.empty(count, dtype=np.int64) + lindexer = np.empty(count, dtype=np.intp) + rindexer = np.empty(count, dtype=np.intp) result = np.empty(count, dtype=left.dtype) i = 0 @@ -517,7 +517,7 @@ def outer_join_indexer(ndarray[join_t] left, ndarray[join_t] right): cdef: Py_ssize_t i, j, nright, nleft, count join_t lval, rval - ndarray[int64_t] lindexer, rindexer + ndarray[intp_t] lindexer, rindexer ndarray[join_t] result nleft = len(left) @@ -564,8 +564,8 @@ def outer_join_indexer(ndarray[join_t] left, ndarray[join_t] right): count += 1 j += 1 - lindexer = np.empty(count, dtype=np.int64) - rindexer = np.empty(count, dtype=np.int64) + lindexer = np.empty(count, dtype=np.intp) + rindexer = np.empty(count, dtype=np.intp) result = np.empty(count, dtype=left.dtype) # do it again, but populate the indexers / result @@ -673,12 +673,12 @@ def asof_join_backward_on_X_by_Y(asof_t[:] left_values, asof_t[:] right_values, by_t[:] left_by_values, by_t[:] right_by_values, - bint allow_exact_matches=1, + bint allow_exact_matches=True, tolerance=None): cdef: Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos - ndarray[int64_t] left_indexer, right_indexer + ndarray[intp_t] left_indexer, right_indexer bint has_tolerance = False asof_t tolerance_ = 0 asof_t diff = 0 @@ -693,8 +693,8 @@ def asof_join_backward_on_X_by_Y(asof_t[:] left_values, left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) if by_t is object: hash_table = PyObjectHashTable(right_size) @@ -747,7 +747,7 @@ def asof_join_forward_on_X_by_Y(asof_t[:] left_values, cdef: Py_ssize_t left_pos, right_pos, left_size, right_size, found_right_pos - ndarray[int64_t] left_indexer, right_indexer + ndarray[intp_t] left_indexer, right_indexer bint has_tolerance = False asof_t tolerance_ = 0 asof_t diff = 0 @@ -762,8 +762,8 @@ def asof_join_forward_on_X_by_Y(asof_t[:] left_values, left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) if by_t is object: hash_table = PyObjectHashTable(right_size) @@ -816,14 +816,14 @@ def asof_join_nearest_on_X_by_Y(asof_t[:] left_values, cdef: Py_ssize_t left_size, right_size, i - ndarray[int64_t] left_indexer, right_indexer, bli, bri, fli, fri + ndarray[intp_t] left_indexer, right_indexer, bli, bri, fli, fri asof_t bdiff, fdiff left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) # search both forward and backward bli, bri = asof_join_backward_on_X_by_Y( @@ -867,7 +867,7 @@ def asof_join_backward(asof_t[:] left_values, cdef: Py_ssize_t left_pos, right_pos, left_size, right_size - ndarray[int64_t] left_indexer, right_indexer + ndarray[intp_t] left_indexer, right_indexer bint has_tolerance = False asof_t tolerance_ = 0 asof_t diff = 0 @@ -880,8 +880,8 @@ def asof_join_backward(asof_t[:] left_values, left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) right_pos = 0 for left_pos in range(left_size): @@ -920,7 +920,7 @@ def asof_join_forward(asof_t[:] left_values, cdef: Py_ssize_t left_pos, right_pos, left_size, right_size - ndarray[int64_t] left_indexer, right_indexer + ndarray[intp_t] left_indexer, right_indexer bint has_tolerance = False asof_t tolerance_ = 0 asof_t diff = 0 @@ -933,8 +933,8 @@ def asof_join_forward(asof_t[:] left_values, left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) right_pos = right_size - 1 for left_pos in range(left_size - 1, -1, -1): @@ -974,14 +974,14 @@ def asof_join_nearest(asof_t[:] left_values, cdef: Py_ssize_t left_size, right_size, i - ndarray[int64_t] left_indexer, right_indexer, bli, bri, fli, fri + ndarray[intp_t] left_indexer, right_indexer, bli, bri, fli, fri asof_t bdiff, fdiff left_size = len(left_values) right_size = len(right_values) - left_indexer = np.empty(left_size, dtype=np.int64) - right_indexer = np.empty(left_size, dtype=np.int64) + left_indexer = np.empty(left_size, dtype=np.intp) + right_indexer = np.empty(left_size, dtype=np.intp) # search both forward and backward bli, bri = asof_join_backward(left_values, right_values, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 094f4a67d2e61..af3315dd2ade6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3925,7 +3925,7 @@ def join( if len(other) == 0 and how in ("left", "outer"): join_index = self._view() if return_indexers: - rindexer = np.repeat(-1, len(join_index)) + rindexer = np.repeat(np.intp(-1), len(join_index)) return join_index, None, rindexer else: return join_index @@ -3933,7 +3933,7 @@ def join( if len(self) == 0 and how in ("right", "outer"): join_index = other._view() if return_indexers: - lindexer = np.repeat(-1, len(join_index)) + lindexer = np.repeat(np.intp(-1), len(join_index)) return join_index, lindexer, None else: return join_index diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0cb9725b70f44..a9faf0098b6d4 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -28,6 +28,7 @@ ) from pandas._typing import ( ArrayLike, + DtypeObj, FrameOrSeries, FrameOrSeriesUnion, IndexLabel, @@ -286,7 +287,7 @@ def merge_ordered( 9 e 3 b 3.0 """ - def _merger(x, y): + def _merger(x, y) -> DataFrame: # perform the ordered merge operation op = _OrderedMerge( x, @@ -741,7 +742,9 @@ def get_result(self) -> DataFrame: return result.__finalize__(self, method="merge") - def _maybe_drop_cross_column(self, result: DataFrame, cross_col: Optional[str]): + def _maybe_drop_cross_column( + self, result: DataFrame, cross_col: Optional[str] + ) -> None: if cross_col is not None: result.drop(columns=cross_col, inplace=True) @@ -824,7 +827,12 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None: if names_to_restore: result.set_index(names_to_restore, inplace=True) - def _maybe_add_join_keys(self, result, left_indexer, right_indexer): + def _maybe_add_join_keys( + self, + result: DataFrame, + left_indexer: Optional[np.ndarray], + right_indexer: Optional[np.ndarray], + ) -> None: left_has_missing = None right_has_missing = None @@ -891,9 +899,14 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): # make sure to just use the right values or vice-versa mask_left = left_indexer == -1 mask_right = right_indexer == -1 - if mask_left.all(): + # error: Item "bool" of "Union[Any, bool]" has no attribute "all" + if mask_left.all(): # type: ignore[union-attr] key_col = Index(rvals) - elif right_indexer is not None and mask_right.all(): + # error: Item "bool" of "Union[Any, bool]" has no attribute "all" + elif ( + right_indexer is not None + and mask_right.all() # type: ignore[union-attr] + ): key_col = Index(lvals) else: key_col = Index(lvals).where(~mask_left, rvals) @@ -916,13 +929,17 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): else: result.insert(i, name or f"key_{i}", key_col) - def _get_join_indexers(self): + def _get_join_indexers(self) -> tuple[np.ndarray, np.ndarray]: """ return the join indexers """ + # Both returned ndarrays are np.intp return get_join_indexers( self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how ) - def _get_join_info(self): + def _get_join_info( + self, + ) -> tuple[Index, np.ndarray | None, np.ndarray | None]: + # Both returned ndarrays are np.intp (if not None) left_ax = self.left.axes[self.axis] right_ax = self.right.axes[self.axis] @@ -930,6 +947,7 @@ def _get_join_info(self): join_index, left_indexer, right_indexer = left_ax.join( right_ax, how=self.how, return_indexers=True, sort=self.sort ) + elif self.right_index and self.how == "left": join_index, left_indexer, right_indexer = _left_join_on_index( left_ax, right_ax, self.left_join_keys, sort=self.sort @@ -952,7 +970,7 @@ def _get_join_info(self): ) else: join_index = self.right.index.take(right_indexer) - left_indexer = np.array([-1] * len(join_index)) + left_indexer = np.array([-1] * len(join_index), dtype=np.intp) elif self.left_index: if len(self.right) > 0: join_index = self._create_join_index( @@ -963,7 +981,7 @@ def _get_join_info(self): ) else: join_index = self.left.index.take(left_indexer) - right_indexer = np.array([-1] * len(join_index)) + right_indexer = np.array([-1] * len(join_index), dtype=np.intp) else: join_index = Index(np.arange(len(left_indexer))) @@ -975,7 +993,7 @@ def _create_join_index( self, index: Index, other_index: Index, - indexer, + indexer: np.ndarray, how: str = "left", ) -> Index: """ @@ -983,14 +1001,15 @@ def _create_join_index( Parameters ---------- - index: Index being rearranged - other_index: Index used to supply values not found in index - indexer: how to rearrange index - how: replacement is only necessary if indexer based on other_index + index : Index being rearranged + other_index : Index used to supply values not found in index + indexer : np.ndarray[np.intp] how to rearrange index + how : str + Replacement is only necessary if indexer based on other_index. Returns ------- - join_index + Index """ if self.how in (how, "outer") and not isinstance(other_index, MultiIndex): # if final index requires values in other_index but not target @@ -1263,8 +1282,8 @@ def _create_cross_configuration( Parameters ---------- - left: DataFrame - right DataFrame + left : DataFrame + right : DataFrame Returns ------- @@ -1419,21 +1438,22 @@ def _validate(self, validate: str) -> None: def get_join_indexers( left_keys, right_keys, sort: bool = False, how: str = "inner", **kwargs -): +) -> tuple[np.ndarray, np.ndarray]: """ Parameters ---------- - left_keys: ndarray, Index, Series - right_keys: ndarray, Index, Series - sort: bool, default False - how: string {'inner', 'outer', 'left', 'right'}, default 'inner' + left_keys : ndarray, Index, Series + right_keys : ndarray, Index, Series + sort : bool, default False + how : {'inner', 'outer', 'left', 'right'}, default 'inner' Returns ------- - tuple of (left_indexer, right_indexer) - indexers into the left_keys, right_keys - + np.ndarray[np.intp] + Indexer into the left_keys. + np.ndarray[np.intp] + Indexer into the right_keys. """ assert len(left_keys) == len( right_keys @@ -1499,9 +1519,9 @@ def restore_dropped_levels_multijoin( join_index : Index the index of the join between the common levels of left and right - lindexer : intp array + lindexer : np.ndarray[np.intp] left indexer - rindexer : intp array + rindexer : np.ndarray[np.intp] right indexer Returns @@ -1515,7 +1535,7 @@ def restore_dropped_levels_multijoin( """ - def _convert_to_multiindex(index) -> MultiIndex: + def _convert_to_multiindex(index: Index) -> MultiIndex: if isinstance(index, MultiIndex): return index else: @@ -1649,7 +1669,7 @@ def _asof_by_function(direction: str): } -def _get_cython_type_upcast(dtype) -> str: +def _get_cython_type_upcast(dtype: DtypeObj) -> str: """ Upcast a dtype to 'int64_t', 'double', or 'object' """ if is_integer_dtype(dtype): return "int64_t" @@ -1706,7 +1726,7 @@ def __init__( fill_method=fill_method, ) - def _validate_specification(self): + def _validate_specification(self) -> None: super()._validate_specification() # we only allow on to be a single item for on @@ -1839,7 +1859,8 @@ def _get_merge_keys(self): return left_join_keys, right_join_keys, join_names - def _get_join_indexers(self): + def _get_join_indexers(self) -> tuple[np.ndarray, np.ndarray]: + # Both returned ndarrays are np.intp """ return the join indexers """ def flip(xs) -> np.ndarray: @@ -1929,7 +1950,10 @@ def flip(xs) -> np.ndarray: return func(left_values, right_values, self.allow_exact_matches, tolerance) -def _get_multiindex_indexer(join_keys, index: MultiIndex, sort: bool): +def _get_multiindex_indexer( + join_keys, index: MultiIndex, sort: bool +) -> tuple[np.ndarray, np.ndarray]: + # Both returned ndarrays are np.intp # left & right join labels and num. of levels at each location mapped = ( @@ -1965,17 +1989,19 @@ def _get_multiindex_indexer(join_keys, index: MultiIndex, sort: bool): return libjoin.left_outer_join(lkey, rkey, count, sort=sort) -def _get_single_indexer(join_key, index, sort: bool = False): - left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) - - left_indexer, right_indexer = libjoin.left_outer_join( - left_key, right_key, count, sort=sort - ) +def _get_single_indexer( + join_key, index: Index, sort: bool = False +) -> tuple[np.ndarray, np.ndarray]: + # Both returned ndarrays are np.intp + left_key, right_key, count = _factorize_keys(join_key, index._values, sort=sort) - return left_indexer, right_indexer + return libjoin.left_outer_join(left_key, right_key, count, sort=sort) -def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool = False): +def _left_join_on_index( + left_ax: Index, right_ax: Index, join_keys, sort: bool = False +) -> tuple[Index, np.ndarray | None, np.ndarray]: + # Both returned ndarrays are np.intp (if not None) if len(join_keys) > 1: if not ( isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels @@ -2212,7 +2238,9 @@ def _validate_operand(obj: FrameOrSeries) -> DataFrame: ) -def _items_overlap_with_suffix(left: Index, right: Index, suffixes: Suffixes): +def _items_overlap_with_suffix( + left: Index, right: Index, suffixes: Suffixes +) -> tuple[Index, Index]: """ Suffixes type validation. diff --git a/pandas/tests/indexes/period/test_join.py b/pandas/tests/indexes/period/test_join.py index aa2393aceee52..77dcd38b239ec 100644 --- a/pandas/tests/indexes/period/test_join.py +++ b/pandas/tests/indexes/period/test_join.py @@ -17,8 +17,8 @@ def test_join_outer_indexer(self): result = pi._outer_indexer(pi._values, pi._values) tm.assert_extension_array_equal(result[0], pi._values) - tm.assert_numpy_array_equal(result[1], np.arange(len(pi), dtype=np.int64)) - tm.assert_numpy_array_equal(result[2], np.arange(len(pi), dtype=np.int64)) + tm.assert_numpy_array_equal(result[1], np.arange(len(pi), dtype=np.intp)) + tm.assert_numpy_array_equal(result[2], np.arange(len(pi), dtype=np.intp)) def test_joins(self, join_type): index = period_range("1/1/2000", "1/20/2000", freq="D") diff --git a/pandas/tests/libs/test_join.py b/pandas/tests/libs/test_join.py index eeb66f8941260..17601d30739e3 100644 --- a/pandas/tests/libs/test_join.py +++ b/pandas/tests/libs/test_join.py @@ -26,23 +26,23 @@ def test_outer_join_indexer(self, dtype): assert isinstance(lindexer, np.ndarray) assert isinstance(rindexer, np.ndarray) tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype)) - exp = np.array([0, 1, 2, -1, -1], dtype=np.int64) + exp = np.array([0, 1, 2, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(lindexer, exp) - exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64) + exp = np.array([-1, -1, 0, 1, 2], dtype=np.intp) tm.assert_numpy_array_equal(rindexer, exp) result, lindexer, rindexer = indexer(empty, right) tm.assert_numpy_array_equal(result, right) - exp = np.array([-1, -1, -1], dtype=np.int64) + exp = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(lindexer, exp) - exp = np.array([0, 1, 2], dtype=np.int64) + exp = np.array([0, 1, 2], dtype=np.intp) tm.assert_numpy_array_equal(rindexer, exp) result, lindexer, rindexer = indexer(left, empty) tm.assert_numpy_array_equal(result, left) - exp = np.array([0, 1, 2], dtype=np.int64) + exp = np.array([0, 1, 2], dtype=np.intp) tm.assert_numpy_array_equal(lindexer, exp) - exp = np.array([-1, -1, -1], dtype=np.int64) + exp = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(rindexer, exp) def test_cython_left_outer_join(self): @@ -148,7 +148,7 @@ def test_left_join_indexer_unique(readonly): b.setflags(write=False) result = libjoin.left_join_indexer_unique(b, a) - expected = np.array([1, 1, 2, 3, 3], dtype=np.int64) + expected = np.array([1, 1, 2, 3, 3], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @@ -283,8 +283,8 @@ def test_inner_join_indexer(): index_exp = np.array([3, 5], dtype=np.int64) tm.assert_almost_equal(index, index_exp) - aexp = np.array([2, 4], dtype=np.int64) - bexp = np.array([1, 2], dtype=np.int64) + aexp = np.array([2, 4], dtype=np.intp) + bexp = np.array([1, 2], dtype=np.intp) tm.assert_almost_equal(ares, aexp) tm.assert_almost_equal(bres, bexp) @@ -293,8 +293,8 @@ def test_inner_join_indexer(): index, ares, bres = libjoin.inner_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) - tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) - tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) + tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp)) + tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp)) def test_outer_join_indexer(): @@ -306,8 +306,8 @@ def test_outer_join_indexer(): index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(index, index_exp) - aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64) - bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64) + aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.intp) + bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) tm.assert_almost_equal(ares, aexp) tm.assert_almost_equal(bres, bexp) @@ -316,8 +316,8 @@ def test_outer_join_indexer(): index, ares, bres = libjoin.outer_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) - tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) - tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) + tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp)) + tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp)) def test_left_join_indexer(): @@ -328,8 +328,8 @@ def test_left_join_indexer(): tm.assert_almost_equal(index, a) - aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64) - bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64) + aexp = np.array([0, 1, 2, 3, 4], dtype=np.intp) + bexp = np.array([-1, -1, 1, -1, 2], dtype=np.intp) tm.assert_almost_equal(ares, aexp) tm.assert_almost_equal(bres, bexp) @@ -338,8 +338,8 @@ def test_left_join_indexer(): index, ares, bres = libjoin.left_join_indexer(a, b) tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64)) - tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64)) - tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64)) + tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp)) + tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp)) def test_left_join_indexer2(): @@ -351,10 +351,10 @@ def test_left_join_indexer2(): exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(res, exp_res) - exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64) + exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_almost_equal(lidx, exp_lidx) - exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64) + exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) tm.assert_almost_equal(ridx, exp_ridx) @@ -367,10 +367,10 @@ def test_outer_join_indexer2(): exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64) tm.assert_almost_equal(res, exp_res) - exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64) + exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_almost_equal(lidx, exp_lidx) - exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64) + exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) tm.assert_almost_equal(ridx, exp_ridx) @@ -383,8 +383,8 @@ def test_inner_join_indexer2(): exp_res = np.array([1, 1, 2, 5], dtype=np.int64) tm.assert_almost_equal(res, exp_res) - exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64) + exp_lidx = np.array([0, 0, 1, 2], dtype=np.intp) tm.assert_almost_equal(lidx, exp_lidx) - exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64) + exp_ridx = np.array([0, 1, 2, 3], dtype=np.intp) tm.assert_almost_equal(ridx, exp_ridx)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40601
2021-03-23T23:26:23Z
2021-03-24T22:23:34Z
2021-03-24T22:23:34Z
2021-03-24T23:00:15Z
REF: deduplicate group cummin/max
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 9766b82b1e9d5..545d6a10232ab 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1249,26 +1249,30 @@ def group_min(groupby_t[:, ::1] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_cummin(groupby_t[:, ::1] out, - ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, - int ngroups, - bint is_datetimelike): +def group_cummin_max(groupby_t[:, ::1] out, + ndarray[groupby_t, ndim=2] values, + const int64_t[:] labels, + int ngroups, + bint is_datetimelike, + bint compute_max): """ - Cumulative minimum of columns of `values`, in row groups `labels`. + Cumulative minimum/maximum of columns of `values`, in row groups `labels`. Parameters ---------- out : array - Array to store cummin in. + Array to store cummin/max in. values : array - Values to take cummin of. + Values to take cummin/max of. labels : int64 array Labels to group by. ngroups : int Number of groups, larger than all entries of `labels`. is_datetimelike : bool True if `values` contains datetime-like entries. + compute_max : bool + True if cumulative maximum should be computed, False + if cumulative minimum should be computed Notes ----- @@ -1283,11 +1287,11 @@ def group_cummin(groupby_t[:, ::1] out, N, K = (<object>values).shape accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) if groupby_t is int64_t: - accum[:] = _int64_max + accum[:] = -_int64_max if compute_max else _int64_max elif groupby_t is uint64_t: - accum[:] = np.iinfo(np.uint64).max + accum[:] = 0 if compute_max else np.iinfo(np.uint64).max else: - accum[:] = np.inf + accum[:] = -np.inf if compute_max else np.inf with nogil: for i in range(N): @@ -1302,66 +1306,32 @@ def group_cummin(groupby_t[:, ::1] out, out[i, j] = val else: mval = accum[lab, j] - if val < mval: - accum[lab, j] = mval = val + if compute_max: + if val > mval: + accum[lab, j] = mval = val + else: + if val < mval: + accum[lab, j] = mval = val out[i, j] = mval @cython.boundscheck(False) @cython.wraparound(False) -def group_cummax(groupby_t[:, ::1] out, +def group_cummin(groupby_t[:, ::1] out, ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): - """ - Cumulative maximum of columns of `values`, in row groups `labels`. + """See group_cummin_max.__doc__""" + group_cummin_max(out, values, labels, ngroups, is_datetimelike, compute_max=False) - Parameters - ---------- - out : array - Array to store cummax in. - values : array - Values to take cummax of. - labels : int64 array - Labels to group by. - ngroups : int - Number of groups, larger than all entries of `labels`. - is_datetimelike : bool - True if `values` contains datetime-like entries. - Notes - ----- - This method modifies the `out` parameter, rather than returning an object. - """ - cdef: - Py_ssize_t i, j, N, K, size - groupby_t val, mval - ndarray[groupby_t, ndim=2] accum - int64_t lab - - N, K = (<object>values).shape - accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) - if groupby_t is int64_t: - accum[:] = -_int64_max - elif groupby_t is uint64_t: - accum[:] = 0 - else: - accum[:] = -np.inf - - with nogil: - for i in range(N): - lab = labels[i] - - if lab < 0: - continue - for j in range(K): - val = values[i, j] - - if _treat_as_na(val, is_datetimelike): - out[i, j] = val - else: - mval = accum[lab, j] - if val > mval: - accum[lab, j] = mval = val - out[i, j] = mval +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cummax(groupby_t[:, ::1] out, + ndarray[groupby_t, ndim=2] values, + const int64_t[:] labels, + int ngroups, + bint is_datetimelike): + """See group_cummin_max.__doc__""" + group_cummin_max(out, values, labels, ngroups, is_datetimelike, compute_max=True)
After #40584, may as well do this one too.
https://api.github.com/repos/pandas-dev/pandas/pulls/40599
2021-03-23T21:45:48Z
2021-03-24T23:37:42Z
2021-03-24T23:37:42Z
2021-03-25T00:04:15Z
DOC: update guideline for installation development environment using pip
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 432584f0da746..b4fa6b008be74 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -325,7 +325,11 @@ Creating a Python environment (pip) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you aren't using conda for your development environment, follow these instructions. -You'll need to have at least Python 3.6.1 installed on your system. +You'll need to have at least Python 3.7.0 installed on your system. If your Python version +is 3.8.0 (or later), you might need to update your ``setuptools`` to version 42.0.0 (or later) +in your development environment before installing the build dependencies:: + + pip install --upgrade setuptools **Unix**/**macOS with virtualenv**
- [x] closes #40595 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40598
2021-03-23T21:26:51Z
2021-03-24T23:13:35Z
2021-03-24T23:13:35Z
2021-03-24T23:13:39Z
ENH: add `decimal` and `thousands` args to `Styler.format()`
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f51f81d7c3504..8bd4e0e50ced8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -102,6 +102,16 @@ class Styler(StylerRenderer): .. versionadded:: 1.2.0 + decimal : str, default "." + Character used as decimal separator for floats, complex and integers + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers + + .. versionadded:: 1.3.0 + escape : bool, default False Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in cell display strings with HTML-safe sequences. @@ -160,6 +170,8 @@ def __init__( cell_ids: bool = True, na_rep: str | None = None, uuid_len: int = 5, + decimal: str = ".", + thousands: str | None = None, escape: bool = False, ): super().__init__( @@ -175,7 +187,14 @@ def __init__( # validate ordered args self.precision = precision # can be removed on set_precision depr cycle self.na_rep = na_rep # can be removed on set_na_rep depr cycle - self.format(formatter=None, precision=precision, na_rep=na_rep, escape=escape) + self.format( + formatter=None, + precision=precision, + na_rep=na_rep, + escape=escape, + decimal=decimal, + thousands=thousands, + ) def _repr_html_(self) -> str: """ diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 45b1d97b9694c..15557c993eab4 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -38,7 +38,7 @@ import pandas.core.common as com jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") -from markupsafe import escape as escape_func # markupsafe is jinja2 dependency +from markupsafe import escape as escape_html # markupsafe is jinja2 dependency BaseFormatter = Union[str, Callable] ExtFormatter = Union[BaseFormatter, Dict[Any, Optional[BaseFormatter]]] @@ -366,6 +366,8 @@ def format( subset: slice | Sequence[Any] | None = None, na_rep: str | None = None, precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, escape: bool = False, ) -> StylerRenderer: """ @@ -390,6 +392,16 @@ def format( .. versionadded:: 1.3.0 + decimal : str, default "." + Character used as decimal separator for floats, complex and integers + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers + + .. versionadded:: 1.3.0 + escape : bool, default False Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in cell display string with HTML-safe sequences. Escaping is done before ``formatter``. @@ -482,6 +494,8 @@ def format( formatter is None, subset is None, precision is None, + decimal == ".", + thousands is None, na_rep is None, escape is False, ) @@ -502,8 +516,14 @@ def format( format_func = formatter[col] except KeyError: format_func = None + format_func = _maybe_wrap_formatter( - format_func, na_rep=na_rep, precision=precision, escape=escape + format_func, + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, ) for row, value in data[[col]].itertuples(): @@ -607,7 +627,7 @@ def _format_table_styles(styles: CSSStyles) -> CSSStyles: ] -def _default_formatter(x: Any, precision: int) -> Any: +def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any: """ Format the display of a value @@ -617,14 +637,54 @@ def _default_formatter(x: Any, precision: int) -> Any: Input variable to be formatted precision : Int Floating point precision used if ``x`` is float or complex. + thousands : bool, default False + Whether to group digits with thousands separated with ",". Returns ------- value : Any - Matches input type, or string if input is float or complex. + Matches input type, or string if input is float or complex or int with sep. """ if isinstance(x, (float, complex)): + if thousands: + return f"{x:,.{precision}f}" return f"{x:.{precision}f}" + elif isinstance(x, int) and thousands: + return f"{x:,.0f}" + return x + + +def _wrap_decimal_thousands( + formatter: Callable, decimal: str, thousands: str | None +) -> Callable: + """ + Takes a string formatting function and wraps logic to deal with thousands and + decimal parameters, in the case that they are non-standard and that the input + is a (float, complex, int). + """ + + def wrapper(x): + if isinstance(x, (float, complex, int)): + if decimal != "." and thousands is not None and thousands != ",": + return ( + formatter(x) + .replace(",", "§_§-") # rare string to avoid "," <-> "." clash. + .replace(".", decimal) + .replace("§_§-", thousands) + ) + elif decimal != "." and (thousands is None or thousands == ","): + return formatter(x).replace(".", decimal) + elif decimal == "." and thousands is not None and thousands != ",": + return formatter(x).replace(",", thousands) + return formatter(x) + + return wrapper + + +def _str_escape_html(x): + """if escaping html: only use on str, else return input""" + if isinstance(x, str): + return escape_html(x) return x @@ -632,6 +692,8 @@ def _maybe_wrap_formatter( formatter: BaseFormatter | None = None, na_rep: str | None = None, precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, escape: bool = False, ) -> Callable: """ @@ -639,29 +701,36 @@ def _maybe_wrap_formatter( a default formatting function. wraps with na_rep, and precision where they are available. """ + # Get initial func from input string, input callable, or from default factory if isinstance(formatter, str): - formatter_func = lambda x: formatter.format(x) + func_0 = lambda x: formatter.format(x) elif callable(formatter): - formatter_func = formatter + func_0 = formatter elif formatter is None: precision = get_option("display.precision") if precision is None else precision - formatter_func = partial(_default_formatter, precision=precision) + func_0 = partial( + _default_formatter, precision=precision, thousands=(thousands is not None) + ) else: raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") - def _str_escape(x, escape: bool): - """if escaping: only use on str, else return input""" - if escape and isinstance(x, str): - return escape_func(x) - else: - return x + # Replace HTML chars if escaping + if escape: + func_1 = lambda x: func_0(_str_escape_html(x)) + else: + func_1 = func_0 - display_func = lambda x: formatter_func(partial(_str_escape, escape=escape)(x)) + # Replace decimals and thousands if non-standard inputs detected + if decimal != "." or (thousands is not None and thousands != ","): + func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands) + else: + func_2 = func_1 + # Replace missing values if na_rep if na_rep is None: - return display_func + return func_2 else: - return lambda x: na_rep if isna(x) else display_func(x) + return lambda x: na_rep if isna(x) else func_2(x) def non_reducing_slice(slice_): diff --git a/pandas/tests/io/formats/style/test_format.py b/pandas/tests/io/formats/style/test_format.py index 09b18e1f71d76..0f3e5863a4a99 100644 --- a/pandas/tests/io/formats/style/test_format.py +++ b/pandas/tests/io/formats/style/test_format.py @@ -197,3 +197,43 @@ def test_format_subset(): assert ctx["body"][1][1]["display_value"] == "1.1" assert ctx["body"][0][2]["display_value"] == "0.123400" assert ctx["body"][1][2]["display_value"] == raw_11 + + +@pytest.mark.parametrize("formatter", [None, "{:,.1f}"]) +@pytest.mark.parametrize("decimal", [".", "*"]) +@pytest.mark.parametrize("precision", [None, 2]) +def test_format_thousands(formatter, decimal, precision): + s = DataFrame([[1000000.123456789]]).style # test float + result = s.format( + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate() + assert "1_000_000" in result["body"][0][1]["display_value"] + + s = DataFrame([[1000000]]).style # test int + result = s.format( + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate() + assert "1_000_000" in result["body"][0][1]["display_value"] + + s = DataFrame([[1 + 1000000.123456789j]]).style # test complex + result = s.format( + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate() + assert "1_000_000" in result["body"][0][1]["display_value"] + + +@pytest.mark.parametrize("formatter", [None, "{:,.4f}"]) +@pytest.mark.parametrize("thousands", [None, ",", "*"]) +@pytest.mark.parametrize("precision", [None, 4]) +def test_format_decimal(formatter, thousands, precision): + s = DataFrame([[1000000.123456789]]).style # test float + result = s.format( + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate() + assert "000_123" in result["body"][0][1]["display_value"] + + s = DataFrame([[1 + 1000000.123456789j]]).style # test complex + result = s.format( + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate() + assert "000_123" in result["body"][0][1]["display_value"]
A PR for Europeans: ![Screen Shot 2021-03-23 at 21 40 19](https://user-images.githubusercontent.com/24256554/112216306-a1b90a00-8c21-11eb-9d2b-c281154b4b46.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/40596
2021-03-23T20:51:09Z
2021-04-20T23:01:03Z
2021-04-20T23:01:03Z
2021-04-21T06:09:13Z
TYP: testing.pyi
diff --git a/pandas/_libs/testing.pyi b/pandas/_libs/testing.pyi new file mode 100644 index 0000000000000..ac0c772780c5c --- /dev/null +++ b/pandas/_libs/testing.pyi @@ -0,0 +1,8 @@ + + +def assert_dict_equal(a, b, compare_keys: bool = ...): ... + +def assert_almost_equal(a, b, + rtol: float = ..., atol: float = ..., + check_dtype: bool = ..., + obj=..., lobj=..., robj=..., index_values=...): ... diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 2adc70438cce7..62205b9203bf0 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -154,6 +154,9 @@ def assert_almost_equal( else: obj = "Input" assert_class_equal(left, right, obj=obj) + + # if we have "equiv", this becomes True + check_dtype = bool(check_dtype) _testing.assert_almost_equal( left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs ) @@ -388,12 +391,15 @@ def _get_ilevel_values(index, level): msg = f"{obj} values are different ({np.round(diff, 5)} %)" raise_assert_detail(obj, msg, left, right) else: + + # if we have "equiv", this becomes True + exact_bool = bool(exact) _testing.assert_almost_equal( left.values, right.values, rtol=rtol, atol=atol, - check_dtype=exact, + check_dtype=exact_bool, obj=obj, lobj=left, robj=right,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40594
2021-03-23T20:09:19Z
2021-03-29T14:55:35Z
2021-03-29T14:55:35Z
2021-03-29T15:02:01Z
TST Add test for loc on sparse dataframes
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 85accac5a8235..e6bc6e5a9ea38 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1163,6 +1163,37 @@ def test_loc_getitem_listlike_all_retains_sparse(self): result = df.loc[[0, 1]] tm.assert_frame_equal(result, df) + @td.skip_if_no_scipy + def test_loc_getitem_sparse_frame(self): + # GH34687 + from scipy.sparse import eye + + df = DataFrame.sparse.from_spmatrix(eye(5)) + result = df.loc[range(2)] + expected = DataFrame( + [[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0]], + dtype=SparseDtype("float64", 0.0), + ) + tm.assert_frame_equal(result, expected) + + result = df.loc[range(2)].loc[range(1)] + expected = DataFrame( + [[1.0, 0.0, 0.0, 0.0, 0.0]], dtype=SparseDtype("float64", 0.0) + ) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_sparse_series(self): + # GH34687 + s = Series([1.0, 0.0, 0.0, 0.0, 0.0], dtype=SparseDtype("float64", 0.0)) + + result = s.loc[range(2)] + expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0)) + tm.assert_series_equal(result, expected) + + result = s.loc[range(3)].loc[range(2)] + expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0)) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("key_type", [iter, np.array, Series, Index]) def test_loc_getitem_iterable(self, float_frame, key_type): idx = key_type(["A", "B", "C"])
Adds a test for loc on sparse dataframes. - [x] closes #34687 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40593
2021-03-23T20:01:24Z
2021-04-02T17:15:22Z
2021-04-02T17:15:22Z
2021-04-02T17:15:27Z
REGR: where not copying on no-op
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index c7bc337239faf..45d131327630e 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) +- Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c13eb3f109354..79cf31c5caaac 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1327,7 +1327,8 @@ def where(self, other, cond, errors="raise") -> List[Block]: if noop: # TODO: avoid the downcasting at the end in this case? - result = values + # GH-39595: Always return a copy + result = values.copy() else: # see if we can operate on the entire block, or need item-by-item # or if we are a single block (ndim == 1) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index bc84d7c70b01c..574fa46d10f67 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -692,3 +692,20 @@ def test_where_try_cast_deprecated(frame_or_series): with tm.assert_produces_warning(FutureWarning): # try_cast keyword deprecated obj.where(mask, -1, try_cast=False) + + +def test_where_copies_with_noop(frame_or_series): + # GH-39595 + result = frame_or_series([1, 2, 3, 4]) + expected = result.copy() + col = result[0] if frame_or_series is DataFrame else result + + where_res = result.where(col < 5) + where_res *= 2 + + tm.assert_equal(result, expected) + + where_res = result.where(col > 5, [1, 2, 3, 4]) + where_res *= 2 + + tm.assert_equal(result, expected)
- [x] closes #39595 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40592
2021-03-23T16:14:13Z
2021-03-23T20:30:31Z
2021-03-23T20:30:31Z
2021-03-25T13:40:11Z
Backport PR #40525 on branch 1.2.x (BUG: to_json failing on PyPy)
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index edf23bf89d7e1..c7bc337239faf 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) +- Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 59298522d86d1..5a3cccdbfea7e 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -272,18 +272,6 @@ static PyObject *get_sub_attr(PyObject *obj, char *attr, char *subAttr) { return ret; } -static int is_simple_frame(PyObject *obj) { - PyObject *check = get_sub_attr(obj, "_mgr", "is_mixed_type"); - int ret = (check == Py_False); - - if (!check) { - return 0; - } - - Py_DECREF(check); - return ret; -} - static Py_ssize_t get_attr_length(PyObject *obj, char *attr) { PyObject *tmp = PyObject_GetAttrString(obj, attr); Py_ssize_t ret; @@ -301,6 +289,17 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) { return ret; } +static int is_simple_frame(PyObject *obj) { + PyObject *mgr = PyObject_GetAttrString(obj, "_mgr"); + if (!mgr) { + return 0; + } + int ret = (get_attr_length(mgr, "blocks") <= 1); + + Py_DECREF(mgr); + return ret; +} + static npy_int64 get_long_attr(PyObject *o, const char *attr) { npy_int64 long_val; PyObject *value = PyObject_GetAttrString(o, attr);
Backport PR #40525: BUG: to_json failing on PyPy
https://api.github.com/repos/pandas-dev/pandas/pulls/40590
2021-03-23T15:19:33Z
2021-03-25T12:49:32Z
2021-03-25T12:49:32Z
2021-03-25T12:49:33Z
PERF: Define Block.__init__ in cython
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 5352ca53e1b54..31b6935e9b2ba 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -455,3 +455,53 @@ def get_blkno_placements(blknos, group: bool = True): for blkno, indexer in get_blkno_indexers(blknos, group): yield blkno, BlockPlacement(indexer) + + +@cython.freelist(64) +cdef class Block: + """ + Defining __init__ in a cython class significantly improves performance. + """ + cdef: + public BlockPlacement _mgr_locs + readonly int ndim + public object values + + def __cinit__(self, values, placement: BlockPlacement, ndim: int): + """ + Parameters + ---------- + values : np.ndarray or ExtensionArray + We assume maybe_coerce_values has already been called. + placement : BlockPlacement + ndim : int + 1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame + """ + self._mgr_locs = placement + self.ndim = ndim + self.values = values + + cpdef __reduce__(self): + # We have to do some gymnastics b/c "ndim" is keyword-only + from functools import partial + + from pandas.core.internals.blocks import new_block + + args = (self.values, self.mgr_locs.indexer) + func = partial(new_block, ndim=self.ndim) + return func, args + + cpdef __setstate__(self, state): + from pandas.core.construction import extract_array + + self.mgr_locs = BlockPlacement(state[0]) + self.values = extract_array(state[1], extract_numpy=True) + if len(state) > 2: + # we stored ndim + self.ndim = state[2] + else: + # older pickle + from pandas.core.internals.api import maybe_infer_ndim + + ndim = maybe_infer_ndim(self.values, self.mgr_locs) + self.ndim = ndim diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py index aab8273b1e213..d6b76510c68ab 100644 --- a/pandas/core/internals/api.py +++ b/pandas/core/internals/api.py @@ -59,13 +59,13 @@ def make_block( if not isinstance(placement, BlockPlacement): placement = BlockPlacement(placement) - ndim = _maybe_infer_ndim(values, placement, ndim) + ndim = maybe_infer_ndim(values, placement, ndim) check_ndim(values, placement, ndim) values = maybe_coerce_values(values) return klass(values, ndim=ndim, placement=placement) -def _maybe_infer_ndim(values, placement: BlockPlacement, ndim: Optional[int]) -> int: +def maybe_infer_ndim(values, placement: BlockPlacement, ndim: Optional[int]) -> int: """ If `ndim` is not provided, infer it from placment and values. """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index e4c89f2e63822..4d1006e5ab65b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -146,7 +146,7 @@ def newfunc(self, *args, **kwargs) -> List[Block]: return cast(F, newfunc) -class Block(PandasObject): +class Block(libinternals.Block, PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure @@ -156,7 +156,7 @@ class Block(PandasObject): values: Union[np.ndarray, ExtensionArray] - __slots__ = ["_mgr_locs", "values", "ndim"] + __slots__ = () is_numeric = False is_bool = False is_object = False @@ -164,35 +164,6 @@ class Block(PandasObject): _can_consolidate = True _validate_ndim = True - @classmethod - def _simple_new( - cls, values: ArrayLike, placement: BlockPlacement, ndim: int - ) -> Block: - """ - Fastpath constructor, does *no* validation - """ - obj = object.__new__(cls) - obj.ndim = ndim - obj.values = values - obj._mgr_locs = placement - return obj - - def __init__(self, values, placement: BlockPlacement, ndim: int): - """ - Parameters - ---------- - values : np.ndarray or ExtensionArray - We assume maybe_coerce_values has already been called. - placement : BlockPlacement (or castable) - ndim : int - 1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame - """ - assert isinstance(ndim, int) - assert isinstance(placement, BlockPlacement) - self.ndim = ndim - self._mgr_locs = placement - self.values = values - @final @property def _consolidate_key(self): @@ -277,7 +248,6 @@ def mgr_locs(self) -> BlockPlacement: @mgr_locs.setter def mgr_locs(self, new_mgr_locs: BlockPlacement): - assert isinstance(new_mgr_locs, BlockPlacement) self._mgr_locs = new_mgr_locs @final @@ -322,16 +292,6 @@ def __repr__(self) -> str: def __len__(self) -> int: return len(self.values) - @final - def __getstate__(self): - return self.mgr_locs.indexer, self.values - - @final - def __setstate__(self, state): - self.mgr_locs = libinternals.BlockPlacement(state[0]) - self.values = extract_array(state[1], extract_numpy=True) - self.ndim = self.values.ndim - def _slice(self, slicer): """ return a slice of my values """ @@ -352,7 +312,7 @@ def getitem_block(self, slicer) -> Block: if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") - return type(self)._simple_new(new_values, new_mgr_locs, self.ndim) + return type(self)(new_values, new_mgr_locs, self.ndim) @final def getitem_block_index(self, slicer: slice) -> Block: @@ -364,7 +324,7 @@ def getitem_block_index(self, slicer: slice) -> Block: # error: Invalid index type "Tuple[ellipsis, slice]" for # "Union[ndarray, ExtensionArray]"; expected type "Union[int, slice, ndarray]" new_values = self.values[..., slicer] # type: ignore[index] - return type(self)._simple_new(new_values, self._mgr_locs, ndim=self.ndim) + return type(self)(new_values, self._mgr_locs, ndim=self.ndim) @final def getitem_block_columns(self, slicer, new_mgr_locs: BlockPlacement) -> Block: @@ -378,7 +338,7 @@ def getitem_block_columns(self, slicer, new_mgr_locs: BlockPlacement) -> Block: if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") - return type(self)._simple_new(new_values, new_mgr_locs, self.ndim) + return type(self)(new_values, new_mgr_locs, self.ndim) @property def shape(self) -> Shape: @@ -1911,7 +1871,7 @@ def set_inplace(self, locs, values): self.values[locs] = values -class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): +class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): """ implement a datetime64 block with a tz attribute """ values: DatetimeArray
Shaves about 10% off the benchmark discussed in https://github.com/pandas-dev/pandas/pull/40171#issuecomment-790219422
https://api.github.com/repos/pandas-dev/pandas/pulls/40586
2021-03-23T14:53:20Z
2021-03-23T20:28:33Z
2021-03-23T20:28:33Z
2021-03-23T21:13:58Z
REF: deduplicate group min/max
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 89020f2078584..403720627bc7a 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1156,18 +1156,40 @@ ctypedef fused groupby_t: @cython.wraparound(False) @cython.boundscheck(False) -def group_max(groupby_t[:, ::1] out, - int64_t[::1] counts, - ndarray[groupby_t, ndim=2] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): +cdef group_min_max(groupby_t[:, ::1] out, + int64_t[::1] counts, + ndarray[groupby_t, ndim=2] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1, + bint compute_max=True): """ - Only aggregates on axis=0 + Compute minimum/maximum of columns of `values`, in row groups `labels`. + + Parameters + ---------- + out : array + Array to store result in. + counts : int64 array + Input as a zeroed array, populated by group sizes during algorithm + values : array + Values to find column-wise min/max of. + labels : int64 array + Labels to group by. + min_count : Py_ssize_t, default -1 + The minimum number of non-NA group elements, NA result if threshold + is not met + compute_max : bint, default True + True to compute group-wise max, False to compute min + + Notes + ----- + This method modifies the `out` parameter, rather than returning an object. + `counts` is modified to hold group sizes """ cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - groupby_t val, count, nan_val - ndarray[groupby_t, ndim=2] maxx + Py_ssize_t i, j, N, K, lab, ngroups = len(counts) + groupby_t val, nan_val + ndarray[groupby_t, ndim=2] group_min_or_max bint runtime_error = False int64_t[:, ::1] nobs @@ -1179,18 +1201,17 @@ def group_max(groupby_t[:, ::1] out, min_count = max(min_count, 1) nobs = np.zeros((<object>out).shape, dtype=np.int64) - maxx = np.empty_like(out) + group_min_or_max = np.empty_like(out) if groupby_t is int64_t: - # Note: evaluated at compile-time - maxx[:] = -_int64_max + group_min_or_max[:] = -_int64_max if compute_max else _int64_max nan_val = NPY_NAT elif groupby_t is uint64_t: # NB: We do not define nan_val because there is no such thing - # for uint64_t. We carefully avoid having to reference it in this - # case. - maxx[:] = 0 + # for uint64_t. We carefully avoid having to reference it in this + # case. + group_min_or_max[:] = 0 if compute_max else np.iinfo(np.uint64).max else: - maxx[:] = -np.inf + group_min_or_max[:] = -np.inf if compute_max else np.inf nan_val = NAN N, K = (<object>values).shape @@ -1208,20 +1229,23 @@ def group_max(groupby_t[:, ::1] out, if not _treat_as_na(val, True): # TODO: Sure we always want is_datetimelike=True? nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if compute_max: + if val > group_min_or_max[lab, j]: + group_min_or_max[lab, j] = val + else: + if val < group_min_or_max[lab, j]: + group_min_or_max[lab, j] = val - for i in range(ncounts): + for i in range(ngroups): for j in range(K): if nobs[i, j] < min_count: if groupby_t is uint64_t: runtime_error = True break else: - out[i, j] = nan_val else: - out[i, j] = maxx[i, j] + out[i, j] = group_min_or_max[i, j] if runtime_error: # We cannot raise directly above because that is within a nogil @@ -1231,75 +1255,24 @@ def group_max(groupby_t[:, ::1] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_min(groupby_t[:, ::1] out, +def group_max(groupby_t[:, ::1] out, int64_t[::1] counts, ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - groupby_t val, count, nan_val - ndarray[groupby_t, ndim=2] minx - bint runtime_error = False - int64_t[:, ::1] nobs - - # TODO(cython 3.0): - # Instead of `labels.shape[0]` use `len(labels)` - if not len(values) == labels.shape[0]: - raise AssertionError("len(index) != len(labels)") - - min_count = max(min_count, 1) - nobs = np.zeros((<object>out).shape, dtype=np.int64) - - minx = np.empty_like(out) - if groupby_t is int64_t: - minx[:] = _int64_max - nan_val = NPY_NAT - elif groupby_t is uint64_t: - # NB: We do not define nan_val because there is no such thing - # for uint64_t. We carefully avoid having to reference it in this - # case. - minx[:] = np.iinfo(np.uint64).max - else: - minx[:] = np.inf - nan_val = NAN + """See group_min_max.__doc__""" + group_min_max(out, counts, values, labels, min_count=min_count, compute_max=True) - N, K = (<object>values).shape - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - if not _treat_as_na(val, True): - # TODO: Sure we always want is_datetimelike=True? - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] < min_count: - if groupby_t is uint64_t: - runtime_error = True - break - else: - out[i, j] = nan_val - else: - out[i, j] = minx[i, j] - - if runtime_error: - # We cannot raise directly above because that is within a nogil - # block. - raise RuntimeError("empty group with uint64_t") +@cython.wraparound(False) +@cython.boundscheck(False) +def group_min(groupby_t[:, ::1] out, + int64_t[::1] counts, + ndarray[groupby_t, ndim=2] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + """See group_min_max.__doc__""" + group_min_max(out, counts, values, labels, min_count=min_count, compute_max=False) @cython.boundscheck(False)
ASVs look unaffected: <details> ``` before after ratio [05a0e98a] [16d06e22] <master> <ref/deduplicate_grp_min_max> 33.0±3ms 33.3±1ms 1.01 gil.ParallelGroupbyMethods.time_loop(2, 'max') 32.2±0.4ms 34.5±3ms 1.07 gil.ParallelGroupbyMethods.time_loop(2, 'min') 64.1±1ms 65.6±0.5ms 1.02 gil.ParallelGroupbyMethods.time_loop(4, 'max') 65.3±3ms 65.5±1ms 1.00 gil.ParallelGroupbyMethods.time_loop(4, 'min') 20.2±2ms 20.2±0.8ms 1.00 gil.ParallelGroupbyMethods.time_parallel(2, 'max') 21.3±1ms 20.1±0.8ms 0.95 gil.ParallelGroupbyMethods.time_parallel(2, 'min') 25.1±0.7ms 25.3±0.5ms 1.01 gil.ParallelGroupbyMethods.time_parallel(4, 'max') 25.0±0.4ms 26.5±2ms 1.06 gil.ParallelGroupbyMethods.time_parallel(4, 'min') 125±50ms 104±1ms ~0.83 groupby.GroupByCythonAgg.time_frame_agg('float64', 'max') 105±2ms 107±2ms 1.02 groupby.GroupByCythonAgg.time_frame_agg('float64', 'min') ``` </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/40584
2021-03-23T14:48:05Z
2021-03-23T20:29:08Z
2021-03-23T20:29:08Z
2021-03-23T20:31:10Z
COMPAT: add back dummy CategoricalBlock class
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 18e584575bc97..f0018928255e6 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -26,6 +26,7 @@ __all__ = [ "Block", + "CategoricalBlock", "NumericBlock", "DatetimeBlock", "DatetimeTZBlock", @@ -56,6 +57,8 @@ def __getattr__(name: str): DeprecationWarning, stacklevel=2, ) - return ExtensionBlock + from pandas.core.internals.blocks import CategoricalBlock + + return CategoricalBlock raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'") diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c13eb3f109354..0c92a2f8515fd 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1244,7 +1244,7 @@ def take_nd( Take values according to indexer and return them as a block.bb """ - # algos.take_nd dispatches for DatetimeTZBlock + # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock # so need to preserve types # sparse is treated like an ndarray, but needs .get_values() shaping @@ -1443,7 +1443,7 @@ class ExtensionBlock(Block): Notes ----- This holds all 3rd-party extension array types. It's also the immediate - parent class for our internal extension types' blocks. + parent class for our internal extension types' blocks, CategoricalBlock. ExtensionArrays are limited to 1-D. """ @@ -2017,6 +2017,11 @@ def _can_hold_element(self, element: Any) -> bool: return True +class CategoricalBlock(ExtensionBlock): + # this Block type is kept for backwards-compatibility + __slots__ = () + + # ----------------------------------------------------------------- # Constructor Helpers @@ -2078,7 +2083,7 @@ def get_block_type(values, dtype: Optional[Dtype] = None): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif isinstance(dtype, CategoricalDtype): - cls = ExtensionBlock + cls = CategoricalBlock elif vtype is Timestamp: cls = DatetimeTZBlock elif vtype is Interval or vtype is Period: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ef2925874c0ac..759500827f344 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -67,6 +67,7 @@ ) from pandas.core.internals.blocks import ( Block, + CategoricalBlock, DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, @@ -1860,6 +1861,13 @@ def _form_blocks( object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_) blocks.extend(object_blocks) + if len(items_dict["CategoricalBlock"]) > 0: + cat_blocks = [ + new_block(array, klass=CategoricalBlock, placement=i, ndim=2) + for i, array in items_dict["CategoricalBlock"] + ] + blocks.extend(cat_blocks) + if len(items_dict["ExtensionBlock"]): external_blocks = [ new_block(array, klass=ExtensionBlock, placement=i, ndim=2)
See https://github.com/pandas-dev/pandas/issues/40226#issuecomment-804903010 @jbrockmendel this is unfortunate, but short term I think we need to keep the CategoricalBlock class. I am not directly sure what would be the best way to actually deprecate this.
https://api.github.com/repos/pandas-dev/pandas/pulls/40582
2021-03-23T13:59:18Z
2021-03-23T16:08:02Z
2021-03-23T16:08:02Z
2021-03-23T16:29:28Z
COMPAT: make Categorical._codes settable again
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 1398db6960cc8..0c7ee2d43753e 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1756,6 +1756,10 @@ def to_dense(self): def _codes(self) -> np.ndarray: return self._ndarray + @_codes.setter + def _codes(self, value: np.ndarray): + self._ndarray = value + def _from_backing_data(self, arr: np.ndarray) -> Categorical: assert isinstance(arr, np.ndarray) assert arr.dtype == self._ndarray.dtype
This is a follow-up on https://github.com/pandas-dev/pandas/pull/40033, which changes the `_code` attribute of a Categorical into a property without a setter. This broke dask / fastparquet, so this PR is making it settable again. We could deprecate the setter, though, at the same time.
https://api.github.com/repos/pandas-dev/pandas/pulls/40580
2021-03-23T12:56:54Z
2021-03-24T07:28:47Z
2021-03-24T07:28:47Z
2021-03-24T07:31:08Z
CLN: remove unused kwarg from IntervalIndex._searchsorted_monotonic
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 947c43f5f8001..e5e7b446d9cb2 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5678,7 +5678,7 @@ def _maybe_cast_slice_bound(self, label, side: str_t, kind): return label - def _searchsorted_monotonic(self, label, side="left"): + def _searchsorted_monotonic(self, label, side: str_t = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index f61c618fa1a07..1b286f258d72c 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -598,7 +598,7 @@ def _maybe_convert_i8(self, key): return key_i8 - def _searchsorted_monotonic(self, label, side, exclude_label=False): + def _searchsorted_monotonic(self, label, side: str = "left"): if not self.is_non_overlapping_monotonic: raise KeyError( "can only get slices from an IntervalIndex if bounds are " @@ -615,11 +615,11 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): side == "right" and not self.left.is_monotonic_increasing ): sub_idx = self.right - if self.open_right or exclude_label: + if self.open_right: label = _get_next_label(label) else: sub_idx = self.left - if self.open_left or exclude_label: + if self.open_left: label = _get_prev_label(label) return sub_idx._searchsorted_monotonic(label, side)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40578
2021-03-23T03:36:09Z
2021-03-23T15:16:20Z
2021-03-23T15:16:20Z
2021-03-23T15:20:34Z
TST/CLN: remove redundant to_json test
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index c7aa56cae20be..3bd78d44a0b04 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1730,11 +1730,6 @@ def test_to_s3(self, s3_resource, s3so): timeout -= 0.1 assert timeout > 0, "Timed out waiting for file to appear on moto" - def test_json_pandas_na(self): - # GH 31615 - result = DataFrame([[pd.NA]]).to_json() - assert result == '{"0":{"0":null}}' - def test_json_pandas_nulls(self, nulls_fixture, request): # GH 31615 if isinstance(nulls_fixture, Decimal):
Should be covered by the test which follows it since `pd.NA` is in `nulls_fixture`
https://api.github.com/repos/pandas-dev/pandas/pulls/40577
2021-03-22T23:51:40Z
2021-03-23T01:24:12Z
2021-03-23T01:24:12Z
2021-03-23T01:27:46Z
BUG: Series.iteritems should be lazy
diff --git a/doc/source/release.rst b/doc/source/release.rst index 3f3e3e87133a0..79daf09083fc1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -151,7 +151,7 @@ API Changes - ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) - +- ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 057f83bff44f2..8ecb6b24083bf 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -196,6 +196,8 @@ API changes covs = rolling_cov(df[['A','B','C']], df[['B','C','D']], 5, pairwise=True) covs[df.index[-1]] +- ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) + MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/series.py b/pandas/core/series.py index 47721ab371c3b..4ab7855ec2f84 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -959,7 +959,7 @@ def iteritems(self): """ Lazily iterate over (index, value) tuples """ - return lzip(iter(self.index), iter(self)) + return zip(iter(self.index), iter(self)) if compat.PY3: # pragma: no cover items = iteritems diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 95b7b6ace4e2d..3336c3948fac6 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1808,6 +1808,9 @@ def test_iteritems(self): for idx, val in compat.iteritems(self.ts): self.assertEqual(val, self.ts[idx]) + # assert is lazy (genrators don't define __getslice__, lists do) + self.assertFalse(hasattr(self.series.iteritems(), '__getslice__')) + def test_sum(self): self._check_stat_op('sum', np.sum)
Closes #6760
https://api.github.com/repos/pandas-dev/pandas/pulls/6761
2014-04-01T19:07:10Z
2014-04-01T20:36:23Z
2014-04-01T20:36:23Z
2014-06-30T14:03:50Z
DOC: documented that .apply(func) executes func twice on the first time
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index cc5ebc730f94a..7412d25b33125 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -663,6 +663,25 @@ The dimension of the returned result can also change: s s.apply(f) + +.. warning:: + + In the current implementation apply calls func twice on the + first group to decide whether it can take a fast or slow code + path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + group. + + .. ipython:: python + + d = DataFrame({"a":["x", "y"], "b":[1,2]}) + def identity(df): + print df + return df + + d.groupby("a").apply(identity) + + Other useful features --------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5ecdd4d8b351d..3329483a61f5c 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3302,6 +3302,14 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, array/series Additional keyword arguments will be passed as keywords to the function + Notes + ----- + In the current implementation apply calls func twice on the + first column/row to decide whether it can take a fast or slow + code path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + column/row. + Examples -------- >>> df.apply(numpy.sqrt) # returns DataFrame diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 996a691eca082..8fd49bd2fe5bd 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -547,7 +547,14 @@ def apply(self, func, *args, **kwargs): Notes ----- - See online documentation for full exposition on how to use apply + See online documentation for full exposition on how to use apply. + + In the current implementation apply calls func twice on the + first group to decide whether it can take a fast or slow code + path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + group. + See also --------
Related issues are #2656, #2936 and #6753.
https://api.github.com/repos/pandas-dev/pandas/pulls/6756
2014-04-01T12:50:45Z
2014-04-01T12:52:59Z
2014-04-01T12:52:59Z
2020-05-14T09:34:31Z
PERF: perf improvements in timedelta conversions from integer dtypes
diff --git a/doc/source/release.rst b/doc/source/release.rst index 3f3e3e87133a0..cd5b0cbd23353 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -208,6 +208,7 @@ Improvements to existing features - Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) - Performance improvement for ``DataFrame.shift`` (:issue:`5609`) +- Performance improvements in timedelta conversions for integer dtypes (:issue:`6754`) .. _release.bug_fixes-0.14.0: diff --git a/pandas/core/common.py b/pandas/core/common.py index b33ee6d66f901..84d22a31531f8 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2130,6 +2130,16 @@ def is_timedelta64_dtype(arr_or_dtype): return issubclass(tipo, np.timedelta64) +def is_timedelta64_ns_dtype(arr_or_dtype): + if isinstance(arr_or_dtype, np.dtype): + tipo = arr_or_dtype.type + elif isinstance(arr_or_dtype, type): + tipo = np.dtype(arr_or_dtype).type + else: + tipo = arr_or_dtype.dtype.type + return tipo == _TD_DTYPE + + def needs_i8_conversion(arr_or_dtype): return (is_datetime64_dtype(arr_or_dtype) or is_timedelta64_dtype(arr_or_dtype)) diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index c490aee134a1a..341feec67fb9b 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -173,6 +173,36 @@ def conv(v): expected = np.timedelta64(timedelta(seconds=1)) self.assertEqual(result, expected) + # arrays of various dtypes + arr = np.array([1]*5,dtype='int64') + result = to_timedelta(arr,unit='s') + expected = Series([ np.timedelta64(1,'s') ]*5) + tm.assert_series_equal(result, expected) + + arr = np.array([1]*5,dtype='int64') + result = to_timedelta(arr,unit='m') + expected = Series([ np.timedelta64(1,'m') ]*5) + tm.assert_series_equal(result, expected) + + arr = np.array([1]*5,dtype='int64') + result = to_timedelta(arr,unit='h') + expected = Series([ np.timedelta64(1,'h') ]*5) + tm.assert_series_equal(result, expected) + + arr = np.array([1]*5,dtype='timedelta64[s]') + result = to_timedelta(arr) + expected = Series([ np.timedelta64(1,'s') ]*5) + tm.assert_series_equal(result, expected) + + arr = np.array([1]*5,dtype='timedelta64[D]') + result = to_timedelta(arr) + expected = Series([ np.timedelta64(1,'D') ]*5) + tm.assert_series_equal(result, expected) + + # these will error + self.assertRaises(ValueError, lambda : to_timedelta(['1h'])) + self.assertRaises(ValueError, lambda : to_timedelta(['1m'])) + def test_to_timedelta_via_apply(self): _skip_if_numpy_not_friendly() diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 4a522d9874c4f..78dbd246648c8 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -8,7 +8,7 @@ import numpy as np import pandas.tslib as tslib from pandas import compat, _np_version_under1p7 -from pandas.core.common import (ABCSeries, is_integer, is_timedelta64_dtype, +from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, is_timedelta64_dtype, _values_from_object, is_list_like, isnull) repr_timedelta = tslib.repr_timedelta64 @@ -23,7 +23,7 @@ def to_timedelta(arg, box=True, unit='ns'): arg : string, timedelta, array of strings (with possible NAs) box : boolean, default True If True returns a Series of the results, if False returns ndarray of values - unit : unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer/float number + unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an integer/float number Returns ------- @@ -32,18 +32,22 @@ def to_timedelta(arg, box=True, unit='ns'): if _np_version_under1p7: raise ValueError("to_timedelta is not support for numpy < 1.7") - def _convert_listlike(arg, box): + def _convert_listlike(arg, box, unit): if isinstance(arg, (list,tuple)): arg = np.array(arg, dtype='O') if is_timedelta64_dtype(arg): - if box: - from pandas import Series - return Series(arg,dtype='m8[ns]') - return arg + value = arg.astype('timedelta64[ns]') + elif is_integer_dtype(arg): + # these are shortcutable + value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]') + else: + try: + value = tslib.array_to_timedelta64(_ensure_object(arg),unit=unit) + except: + value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ]) - value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ]) if box: from pandas import Series value = Series(value,dtype='m8[ns]') @@ -53,10 +57,10 @@ def _convert_listlike(arg, box): return arg elif isinstance(arg, ABCSeries): from pandas import Series - values = _convert_listlike(arg.values, box=False) + values = _convert_listlike(arg.values, box=False, unit=unit) return Series(values, index=arg.index, name=arg.name, dtype='m8[ns]') elif is_list_like(arg): - return _convert_listlike(arg, box=box) + return _convert_listlike(arg, box=box, unit=unit) # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit) @@ -139,7 +143,7 @@ def convert(r=None, unit=None, m=m): return convert # no converter - raise ValueError("cannot create timedelta string converter") + raise ValueError("cannot create timedelta string converter for [{0}]".format(r)) def _possibly_cast_to_timedelta(value, coerce=True): """ try to cast to timedelta64, if already a timedeltalike, then make diff --git a/vb_suite/suite.py b/vb_suite/suite.py index 1b845e88a9d79..03f85da698ff8 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -26,6 +26,7 @@ 'reshape', 'stat_ops', 'timeseries', + 'timedelta', 'eval'] by_module = {} diff --git a/vb_suite/timedelta.py b/vb_suite/timedelta.py new file mode 100644 index 0000000000000..febd70739b2c9 --- /dev/null +++ b/vb_suite/timedelta.py @@ -0,0 +1,32 @@ +from vbench.api import Benchmark +from datetime import datetime + +common_setup = """from pandas_vb_common import * +from pandas import to_timedelta +""" + +#---------------------------------------------------------------------- +# conversion + +setup = common_setup + """ +arr = np.random.randint(0,1000,size=10000) +""" + +stmt = "to_timedelta(arr,unit='s')" +timedelta_convert_int = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) + +setup = common_setup + """ +arr = np.random.randint(0,1000,size=10000) +arr = [ '{0} days'.format(i) for i in arr ] +""" + +stmt = "to_timedelta(arr)" +timedelta_convert_string = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) + +setup = common_setup + """ +arr = np.random.randint(0,60,size=10000) +arr = [ '00:00:{0:02d}'.format(i) for i in arr ] +""" + +stmt = "to_timedelta(arr)" +timedelta_convert_string_seconds = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index fafa7f75501d9..ccd4bd7ae371a 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -278,7 +278,7 @@ def date_range(start=None, end=None, periods=None, freq=None): """ datetimeindex_converter = \ - Benchmark('DatetimeConverter.convert(rng, None, None)', + Benchmark('DatetimeConverter.convert(rng, None, None)', setup, start_date=datetime(2013, 1, 1)) # Adding custom business day
``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- timedelta_convert | 0.2253 | 71.0374 | 0.0032 | timedelta_convert_string_seconds | 186.8856 | 184.1813 | 1.0147 | timedelta_convert_string | 185.8193 | 181.2983 | 1.0249 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 ``` Using shortcuts for integers arrays
https://api.github.com/repos/pandas-dev/pandas/pulls/6754
2014-04-01T12:15:16Z
2014-04-01T13:03:04Z
2014-04-01T13:03:04Z
2014-06-12T08:05:49Z
BUG: fix NDFrame.as_blocks() for sparse containers
diff --git a/doc/source/release.rst b/doc/source/release.rst index c0d4c0c73296f..5e0593a2beec4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -296,6 +296,7 @@ Bug Fixes - Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) - Bug in resample when ``how=None`` resample freq is the same as the axis frequency (:issue:`5955`) - Bug in downcasting inference with empty arrays (:issue:`6733`) +- Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) pandas 0.13.1 ------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fc7883f789703..38f4ba0a25d07 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2003,7 +2003,7 @@ def ftypes(self): return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_) - def as_blocks(self, columns=None): + def as_blocks(self): """ Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. @@ -2025,12 +2025,18 @@ def as_blocks(self, columns=None): """ self._consolidate_inplace() - bd = dict() + bd = {} for b in self._data.blocks: - b = b.reindex_items_from(columns or b.items) - bd[str(b.dtype)] = self._constructor( - BlockManager([b], [b.items, self.index])).__finalize__(self) - return bd + bd.setdefault(str(b.dtype), []).append(b) + + result = {} + for dtype, blocks in bd.items(): + # Must combine even after consolidation, because there may be + # sparse items which are never consolidated into one block. + combined = self._data.combine(blocks, copy=True) + result[dtype] = self._constructor(combined).__finalize__(self) + + return result @property def blocks(self): diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 030fe5fb821c4..7696353dca6f1 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -1515,6 +1515,14 @@ def test_sparse_pow_issue(self): self.assertEqual(len(r2.sp_values), len(r1.sp_values)) + def test_as_blocks(self): + df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]}, + dtype='float64') + + df_blocks = df.blocks + self.assertEqual(list(df_blocks.keys()), ['float64']) + assert_frame_equal(df_blocks['float64'], df) + def _dense_series_compare(s, f): result = f(s)
SparseBlocks don't consolidate, so previous implementation silently dropped all but the last blocks for given dtype: ``` python In [1]: pd.__version__ Out[1]: '0.13.1-527-g73506cb' In [2]: pd.SparseDataFrame({'a': [1,2,3, np.nan, np.nan], 'b': [1,2,3, np.nan, np.nan]}) Out[2]: a b 0 1 1 1 2 2 2 3 3 3 NaN NaN 4 NaN NaN [5 rows x 2 columns] In [3]: _2.blocks Out[3]: {'float64': b 0 1 1 2 2 3 3 NaN 4 NaN [5 rows x 1 columns]} ``` when the last output should be: ``` python In [3]: _2.blocks Out[3]: {'float64': a b 0 1 1 1 2 2 2 3 3 3 NaN NaN 4 NaN NaN [5 rows x 2 columns]} ``` This also drops the `columns` kwarg of as_blocks since it doubles reindex functionality.
https://api.github.com/repos/pandas-dev/pandas/pulls/6748
2014-03-31T08:33:07Z
2014-03-31T13:33:15Z
2014-03-31T13:33:15Z
2014-06-12T21:22:30Z
Quick shift fixes
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fc7883f789703..67f8694925dad 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3238,9 +3238,9 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): if periods == 0: return self - axis = self._get_axis_number(axis) + block_axis = self._get_block_manager_axis(axis) if freq is None and not len(kwds): - new_data = self._data.shift(periods=periods, axis=axis) + new_data = self._data.shift(periods=periods, axis=block_axis) else: return self.tshift(periods, freq, **kwds) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index d32664559f7fc..ed8cfb59bc995 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -966,7 +966,12 @@ def shift(self, periods, axis=0): # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = com._maybe_upcast(self.values) - new_values = np.roll(new_values.T,periods,axis=axis) + # make sure array sent to np.roll is c_contiguous + f_ordered = new_values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + new_values = np.roll(new_values, periods, axis=axis) axis_indexer = [ slice(None) ] * self.ndim if periods > 0: axis_indexer[axis] = slice(None,periods) @@ -974,7 +979,11 @@ def shift(self, periods, axis=0): axis_indexer[axis] = slice(periods,None) new_values[tuple(axis_indexer)] = fill_value - return [make_block(new_values.T, self.items, self.ref_items, + # restore original order + if f_ordered: + new_values = new_values.T + + return [make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] def eval(self, func, other, raise_on_error=True, try_cast=False): diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 7f9063003191f..1c0e9086c63ee 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -386,7 +386,7 @@ def test_equal(name): def test_unequal(name): df, df2 = pairs[name] return df.equals(df2) - + float_df = DataFrame(np.random.randn(1000, 1000)) object_df = DataFrame([['foo']*1000]*1000) nonunique_cols = object_df.copy() @@ -434,11 +434,21 @@ def test_unequal(name): # frame shift speedup issue-5609 setup = common_setup + """ -df = pd.DataFrame(np.random.rand(10000,500)) +df = DataFrame(np.random.rand(10000,500)) +# note: df._data.blocks are f_contigous """ frame_shift_axis0 = Benchmark('df.shift(1,axis=0)', setup, - name = 'frame_shift_axis_0', start_date=datetime(2014,1,1)) frame_shift_axis1 = Benchmark('df.shift(1,axis=1)', setup, - name = 'frame_shift_axis_1', - start_date=datetime(2014,1,1)) \ No newline at end of file + start_date=datetime(2014,1,1)) + +# +setup = common_setup + """ +df = DataFrame(np.random.rand(10000,500)) +df = df.consolidate() +# note: df._data.blocks are c_contigous +""" +frame_shift_c_order_axis0 = Benchmark('df.shift(1,axis=0)', setup, + start_date=datetime(2014,1,1)) +frame_shift_c_order_axis1 = Benchmark('df.shift(1,axis=1)', setup, + start_date=datetime(2014,1,1))
Adjustments to the quick fix PR. https://gist.github.com/dalejung/9731798#file-vb_suite-log ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_shift_c_order_axis1 | 14.4524 | 52.8037 | 0.2737 | frame_shift_c_order_axis0 | 16.9516 | 46.5320 | 0.3643 | frame_shift_axis0 | 15.2656 | 16.3440 | 0.9340 | frame_shift_axis1 | 22.2600 | 22.0567 | 1.0092 | ``` Note, that the speed up was really in sending a C order array to `np.take`. You'll notice that master is slow for `frame_shift_c_order_` which are tests where the `block.values` is c_contiguous. Thus current master always transposes. The original example had it's speed up because it `np.roll`ed `df.values` which is C ordered (fast) while the `.blocks` were were (slow).
https://api.github.com/repos/pandas-dev/pandas/pulls/6747
2014-03-31T02:45:19Z
2014-03-31T09:41:59Z
2014-03-31T09:41:59Z
2014-07-16T08:59:58Z
Doc fixes
diff --git a/doc/source/io.rst b/doc/source/io.rst index 4d97c43e85de8..5856b4e293259 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1878,14 +1878,14 @@ to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', parse_cols=2, index_col=None, na_values=['NA']) + read_excel('path_to_file.xls', 'Sheet1', parse_cols=2) If `parse_cols` is a list of integers, then it is assumed to be the file column indices to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', parse_cols=[0, 2, 3], index_col=None, na_values=['NA']) + read_excel('path_to_file.xls', 'Sheet1', parse_cols=[0, 2, 3]) To write a DataFrame object to a sheet of an Excel file, you can use the ``to_excel`` instance method. The arguments are largely the same as ``to_csv`` diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 274a2341c1a9f..b872c8a60e34e 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -199,9 +199,9 @@ the right thing: Reshaping by Melt ----------------- -The ``melt`` function found in ``pandas.core.reshape`` is useful to massage a +The :func:`~pandas.melt` function is useful to massage a DataFrame into a format where one or more columns are identifier variables, -while all other columns, considered measured variables, are "pivoted" to the +while all other columns, considered measured variables, are "unpivoted" to the row axis, leaving just two non-identifier columns, "variable" and "value". The names of those columns can be customized by supplying the ``var_name`` and ``value_name`` parameters. diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 10236f6b2e191..d05ae4b72c2f1 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -261,7 +261,7 @@ bar plot: .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -275,7 +275,7 @@ To produce a stacked bar plot, pass ``stacked=True``: .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -287,7 +287,7 @@ To get horizontal bar plots, pass ``kind='barh'``: .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -320,7 +320,7 @@ New since 0.10.0, the ``by`` keyword can be specified to plot grouped histograms .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -434,12 +434,12 @@ Scatter plot matrix .. _visualization.kde: *New in 0.8.0* You can create density plots using the Series/DataFrame.plot and -setting `kind='kde'`: +setting ``kind='kde'``: .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -460,7 +460,7 @@ too dense to plot each point individually. .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python @@ -486,7 +486,7 @@ given by column ``z``. The bins are aggregated with numpy's ``max`` function. .. ipython:: python :suppress: - plt.figure(); + plt.figure() .. ipython:: python diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index f5ca96e2d827e..0d06e9253ce1f 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -617,16 +617,34 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): """ "Unpivots" a DataFrame from wide format to long format, optionally leaving - id variables set + identifier variables set. + + This function is useful to massage a DataFrame into a format where one + or more columns are identifier variables (`id_vars`), while all other + columns, considered measured variables (`value_vars`), are "unpivoted" to + the row axis, leaving just two non-identifier columns, 'variable' and + 'value'. Parameters ---------- frame : DataFrame - id_vars : tuple, list, or ndarray - value_vars : tuple, list, or ndarray - var_name : scalar, if None uses frame.column.name or 'variable' + id_vars : tuple, list, or ndarray, optional + Column(s) to use as identifier variables. + value_vars : tuple, list, or ndarray, optional + Column(s) to unpivot. If not specified, uses all columns that + are not set as `id_vars`. + var_name : scalar + Name to use for the 'variable' column. If None it uses + ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' - col_level : scalar, if columns are a MultiIndex then use this level to melt + Name to use for the 'value' column. + col_level : int or string, optional + If columns are a MultiIndex then use this level to melt. + + See also + -------- + pivot_table + DataFrame.pivot Examples -------- @@ -634,35 +652,53 @@ def melt(frame, id_vars=None, value_vars=None, >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) - >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 - >>> melt(df, id_vars=['A'], value_vars=['B']) + >>> pd.melt(df, id_vars=['A'], value_vars=['B']) + A variable value + 0 a B 1 + 1 b B 3 + 2 c B 5 + + >>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 + 3 a C 2 + 4 b C 4 + 5 c C 6 + + The names of 'variable' and 'value' columns can be customized: - >>> melt(df, id_vars=['A'], value_vars=['B'], - ... var_name='myVarname', value_name='myValname') + >>> pd.melt(df, id_vars=['A'], value_vars=['B'], + ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 + If you have multi-index columns: + >>> df.columns = [list('ABC'), list('DEF')] + >>> df + A B C + D E F + 0 a 1 2 + 1 b 3 4 + 2 c 5 6 - >>> melt(df, col_level=0, id_vars=['A'], value_vars=['B']) + >>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 - >>> melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')]) + >>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3
https://api.github.com/repos/pandas-dev/pandas/pulls/6746
2014-03-30T23:02:37Z
2014-04-01T08:51:41Z
2014-04-01T08:51:41Z
2014-07-12T16:41:14Z
CLN: revisit & simplify Block/BlockManager, remove axes
diff --git a/pandas/core/format.py b/pandas/core/format.py index 6d0b0596d08d2..43eb0e890aa62 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1024,9 +1024,8 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, # preallocate data 2d list self.blocks = self.obj._data.blocks - ncols = sum(len(b.items) for b in self.blocks) + ncols = sum(b.shape[0] for b in self.blocks) self.data = [None] * ncols - self.column_map = self.obj._data.get_items_map(use_cached=False) if chunksize is None: chunksize = (100000 / (len(self.cols) or 1)) or 1 @@ -1293,10 +1292,9 @@ def _save_chunk(self, start_i, end_i): float_format=self.float_format, date_format=self.date_format) - for i, item in enumerate(b.items): - + for col_loc, col in zip(b.mgr_locs, d): # self.data is a preallocated list - self.data[self.column_map[b][i]] = d[i] + self.data[col_loc] = col ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, float_format=self.float_format, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 23736dafe3556..fcd2e65afddcb 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1043,9 +1043,11 @@ def to_panel(self): new_blocks = [] for block in selfsorted._data.blocks: - newb = block2d_to_blocknd(block.values.T, block.items, shape, - [major_labels, minor_labels], - ref_items=selfsorted.columns) + newb = block2d_to_blocknd( + values=block.values.T, + placement=block.mgr_locs, shape=shape, + labels=[major_labels, minor_labels], + ref_items=selfsorted.columns) new_blocks.append(newb) # preserve names, if any @@ -1934,7 +1936,9 @@ def _ensure_valid_index(self, value): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') - self._data.set_axis(1, value.index.copy(), check_axis=False) + + self._data = self._data.reindex_axis(value.index.copy(), axis=1, + fill_value=np.nan) # we are a scalar # noop @@ -2039,7 +2043,11 @@ def _sanitize_column(self, key, value): @property def _series(self): - return self._data.get_series_dict() + result = {} + for idx, item in enumerate(self.columns): + result[item] = Series(self._data.iget(idx), index=self.index, + name=item) + return result def lookup(self, row_labels, col_labels): """Label-based "fancy indexing" function for DataFrame. @@ -2629,16 +2637,14 @@ def trans(v): indexer = _nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) + bm_axis = self._get_block_manager_axis(axis) + new_data = self._data.take(indexer, axis=bm_axis, + convert=False, verify=False) + if inplace: - if axis == 1: - new_data = self._data.reindex_items( - self._data.items[indexer], - copy=False) - elif axis == 0: - new_data = self._data.take(indexer) - self._update_inplace(new_data) + return self._update_inplace(new_data) else: - return self.take(indexer, axis=axis, convert=False, is_copy=False) + return self._constructor(new_data).__finalize__(self) def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): """ @@ -2673,16 +2679,13 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): else: return self.take(indexer, axis=axis, convert=False) + bm_axis = self._get_block_manager_axis(axis) + new_data = self._data.take(indexer, axis=bm_axis, + convert=False, verify=False) if inplace: - if axis == 1: - new_data = self._data.reindex_items( - self._data.items[indexer], - copy=False) - elif axis == 0: - new_data = self._data.take(indexer) - self._update_inplace(new_data) + return self._update_inplace(new_data) else: - return self.take(indexer, axis=axis, convert=False, is_copy=False) + return self._constructor(new_data).__finalize__(self) def swaplevel(self, i, j, axis=0): """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d894289c87eee..3f2ecd8afd2d4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -565,7 +565,7 @@ def f(x): f = _get_rename_function(v) baxis = self._get_block_manager_axis(axis) - result._data = result._data.rename(f, axis=baxis, copy=copy) + result._data = result._data.rename_axis(f, axis=baxis, copy=copy) result._clear_item_cache() if inplace: @@ -1217,21 +1217,9 @@ def take(self, indices, axis=0, convert=True, is_copy=True): taken : type of caller """ - # check/convert indicies here - if convert: - axis = self._get_axis_number(axis) - indices = _maybe_convert_indices( - indices, len(self._get_axis(axis))) - - baxis = self._get_block_manager_axis(axis) - if baxis == 0: - labels = self._get_axis(axis) - new_items = labels.take(indices) - new_data = self._data.reindex_axis(new_items, indexer=indices, - axis=baxis) - else: - new_data = self._data.take(indices, axis=baxis) - + new_data = self._data.take(indices, + axis=self._get_block_manager_axis(axis), + convert=True, verify=True) result = self._constructor(new_data).__finalize__(self) # maybe set copy if we didn't actually change the index @@ -1701,7 +1689,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, labels, method, level, limit=limit, copy_if_needed=True) return self._reindex_with_indexers( {axis: [new_index, indexer]}, method=method, fill_value=fill_value, - limit=limit, copy=copy).__finalize__(self) + limit=limit, copy=copy) def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, limit=None, copy=False, @@ -1716,30 +1704,16 @@ def _reindex_with_indexers(self, reindexers, method=None, if index is None: continue - index = _ensure_index(index) - # reindex the axis - if method is not None: - new_data = new_data.reindex_axis( - index, indexer=indexer, method=method, axis=baxis, - fill_value=fill_value, limit=limit, copy=copy) - - elif indexer is not None: - # TODO: speed up on homogeneous DataFrame objects + index = _ensure_index(index) + if indexer is not None: indexer = com._ensure_int64(indexer) - new_data = new_data.reindex_indexer(index, indexer, axis=baxis, - fill_value=fill_value, - allow_dups=allow_dups) - - elif (baxis == 0 and index is not None and - index is not new_data.axes[baxis]): - new_data = new_data.reindex_items(index, copy=copy, - fill_value=fill_value) - - elif (baxis > 0 and index is not None and - index is not new_data.axes[baxis]): - new_data = new_data.copy(deep=copy) - new_data.set_axis(baxis, index) + + # TODO: speed up on homogeneous DataFrame objects + new_data = new_data.reindex_indexer(index, indexer, axis=baxis, + fill_value=fill_value, + allow_dups=allow_dups, + copy=copy) if copy and new_data is self._data: new_data = new_data.copy() diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c0222ad248e0c..f650b41ff12be 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2196,10 +2196,10 @@ def _iterate_slices(self): yield val, slicer(val) def _cython_agg_general(self, how, numeric_only=True): - new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only) - return self._wrap_agged_blocks(new_blocks) + new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only) + return self._wrap_agged_blocks(new_items, new_blocks) - def _wrap_agged_blocks(self, blocks): + def _wrap_agged_blocks(self, items, blocks): obj = self._obj_with_exclusions new_axes = list(obj._data.axes) @@ -2210,6 +2210,10 @@ def _wrap_agged_blocks(self, blocks): else: new_axes[self.axis] = self.grouper.result_index + # Make sure block manager integrity check passes. + assert new_axes[0].equals(items) + new_axes[0] = items + mgr = BlockManager(blocks, new_axes) new_obj = type(obj)(mgr) @@ -2223,14 +2227,14 @@ def _cython_agg_blocks(self, how, numeric_only=True): new_blocks = [] + if numeric_only: + data = data.get_numeric_data(copy=False) + for block in data.blocks: values = block.values is_numeric = is_numeric_dtype(values.dtype) - if numeric_only and not is_numeric: - continue - if is_numeric: values = com.ensure_float(values) @@ -2239,13 +2243,13 @@ def _cython_agg_blocks(self, how, numeric_only=True): # see if we can cast the block back to the original dtype result = block._try_cast_result(result) - newb = make_block(result, block.items, block.ref_items) + newb = make_block(result, placement=block.mgr_locs) new_blocks.append(newb) if len(new_blocks) == 0: raise DataError('No numeric types to aggregate') - return new_blocks + return data.items, new_blocks def _get_data_to_aggregate(self): obj = self._obj_with_exclusions @@ -2837,28 +2841,10 @@ def _wrap_aggregated_output(self, output, names=None): return result.convert_objects() - def _wrap_agged_blocks(self, blocks): - obj = self._obj_with_exclusions - - if self.axis == 0: - agg_labels = obj.columns - else: - agg_labels = obj.index - - if sum(len(x.items) for x in blocks) == len(agg_labels): - output_keys = agg_labels - else: - all_items = [] - for b in blocks: - all_items.extend(b.items) - output_keys = agg_labels[agg_labels.isin(all_items)] - - for blk in blocks: - blk.set_ref_items(output_keys, maybe_rename=False) - + def _wrap_agged_blocks(self, items, blocks): if not self.as_index: index = np.arange(blocks[0].values.shape[1]) - mgr = BlockManager(blocks, [output_keys, index]) + mgr = BlockManager(blocks, [items, index]) result = DataFrame(mgr) group_levels = self.grouper.get_group_levels() @@ -2869,7 +2855,7 @@ def _wrap_agged_blocks(self, blocks): result = result.consolidate() else: index = self.grouper.result_index - mgr = BlockManager(blocks, [output_keys, index]) + mgr = BlockManager(blocks, [items, index]) result = DataFrame(mgr) if self.axis == 1: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 792a310c8a554..7465fad39496c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1,30 +1,39 @@ +import copy import itertools import re import operator from datetime import datetime, timedelta -import copy -from collections import defaultdict +from collections import defaultdict, deque import numpy as np from pandas.core.base import PandasObject +from pandas.hashtable import Factorizer from pandas.core.common import (_possibly_downcast_to_dtype, isnull, notnull, _NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like, ABCSparseSeries, _infer_dtype_from_scalar, - _values_from_object, _is_null_datelike_scalar) -from pandas.core.index import Index, MultiIndex, _ensure_index + _is_null_datelike_scalar, + is_timedelta64_dtype, is_datetime64_dtype,) +from pandas.core.index import Index, Int64Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) import pandas.core.common as com from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib import pandas.tslib as tslib import pandas.computation.expressions as expressions +from pandas.util.decorators import cache_readonly from pandas.tslib import Timestamp from pandas import compat -from pandas.compat import range, lrange, lmap, callable, map, zip, u +from pandas.compat import (range, lrange, lmap, callable, map, zip, u, + OrderedDict) from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type + + +from pandas.lib import BlockPlacement + + class Block(PandasObject): """ @@ -33,7 +42,7 @@ class Block(PandasObject): Index-ignorant; let the container take care of that """ - __slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim'] + __slots__ = ['_mgr_locs', 'values', 'ndim'] is_numeric = False is_float = False is_integer = False @@ -49,29 +58,20 @@ class Block(PandasObject): _verify_integrity = True _ftype = 'dense' - def __init__(self, values, items, ref_items, ndim=None, fastpath=False, - placement=None): - + def __init__(self, values, placement, ndim=None, fastpath=False): if ndim is None: ndim = values.ndim - - if values.ndim != ndim: + elif values.ndim != ndim: raise ValueError('Wrong number of dimensions') + self.ndim = ndim - if len(items) != len(values): - raise ValueError('Wrong number of items passed %d, index implies ' - '%d' % (len(values), len(items))) - - self.set_ref_locs(placement) + self.mgr_locs = placement self.values = values - self.ndim = ndim - if fastpath: - self.items = items - self.ref_items = ref_items - else: - self.items = _ensure_index(items) - self.ref_items = _ensure_index(ref_items) + if len(self.mgr_locs) != len(self.values): + raise ValueError('Wrong number of items passed %d,' + ' placement implies %d' % ( + len(self.values), len(self.mgr_locs))) @property def _consolidate_key(self): @@ -91,79 +91,28 @@ def fill_value(self): return np.nan @property - def ref_locs(self): - if self._ref_locs is None: - # we have a single block, maybe have duplicates - # but indexer is easy - # also if we are not really reindexing, just numbering - if self._is_single_block or self.ref_items.equals(self.items): - indexer = np.arange(len(self.items)) - else: - - indexer = self.ref_items.get_indexer(self.items) - indexer = com._ensure_platform_int(indexer) - if (indexer == -1).any(): + def mgr_locs(self): + return self._mgr_locs - # this means that we have nan's in our block - try: - indexer[indexer == -1] = np.arange( - len(self.items))[isnull(self.items)] - except: - raise AssertionError('Some block items were not in ' - 'block ref_items') - - self._ref_locs = indexer - return self._ref_locs - - def take_ref_locs(self, indexer): - """ - need to preserve the ref_locs and just shift them - return None if ref_locs is None - - see GH6509 + def make_block_same_class(self, values, placement, copy=False, + **kwargs): """ + Wrap given values in a block of same type as self. - ref_locs = self._ref_locs - if ref_locs is None: - return None - - tindexer = np.ones(len(ref_locs),dtype=bool) - tindexer[indexer] = False - tindexer = tindexer.astype(int).cumsum()[indexer] - ref_locs = ref_locs[indexer] - - # Make sure the result is a copy, or otherwise self._ref_locs will be - # updated. - if ref_locs.base is not None: - ref_locs = ref_locs.copy() - - ref_locs -= tindexer - return ref_locs + `kwargs` are used in SparseBlock override. - def reset_ref_locs(self): - """ reset the block ref_locs """ - self._ref_locs = np.empty(len(self.items), dtype='int64') - - def set_ref_locs(self, placement): - """ explicity set the ref_locs indexer, only necessary for duplicate - indicies """ - if placement is None: - self._ref_locs = None - else: - self._ref_locs = np.array(placement, dtype='int64', copy=True) + if copy: + values = values.copy() + return make_block(values, placement, klass=self.__class__, + fastpath=True) - def set_ref_items(self, ref_items, maybe_rename=True): - """ - If maybe_rename=True, need to set the items for this guy - """ - if not isinstance(ref_items, Index): - raise AssertionError('block ref_items must be an Index') - if maybe_rename == 'clear': - self._ref_locs = None - elif maybe_rename: - self.items = ref_items.take(self.ref_locs) - self.ref_items = ref_items + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, BlockPlacement): + new_mgr_locs = BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs def __unicode__(self): @@ -178,32 +127,47 @@ def __unicode__(self): shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) result = '%s: %s, %s, dtype: %s' % ( - name, com.pprint_thing(self.items), shape, self.dtype) + name, com.pprint_thing(self.mgr_locs.indexer), shape, + self.dtype) return result - def __contains__(self, item): - return item in self.items - def __len__(self): return len(self.values) def __getstate__(self): - # should not pickle generally (want to share ref_items), but here for - # completeness - return (self.items, self.ref_items, self.values) + return self.mgr_locs.indexer, self.values def __setstate__(self, state): - items, ref_items, values = state - self.items = _ensure_index(items) - self.ref_items = _ensure_index(ref_items) - self.values = values - self.ndim = values.ndim + self.mgr_locs = BlockPlacement(state[0]) + self.values = state[1] + self.ndim = self.values.ndim def _slice(self, slicer): """ return a slice of my values """ return self.values[slicer] + def getitem_block(self, slicer, new_mgr_locs=None): + """ + Perform __getitem__-like, return result as block. + + As of now, only supports slices that preserve dimensionality. + + """ + if new_mgr_locs is None: + if isinstance(slicer, tuple): + axis0_slicer = slicer[0] + else: + axis0_slicer = slicer + new_mgr_locs = self.mgr_locs[axis0_slicer] + + new_values = self._slice(slicer) + + if new_values.ndim != self.ndim: + raise ValueError("Only same dim slicing is allowed") + + return self.make_block_same_class(new_values, new_mgr_locs) + @property def shape(self): return self.values.shape @@ -220,22 +184,8 @@ def dtype(self): def ftype(self): return "%s:%s" % (self.dtype, self._ftype) - def as_block(self, result): - """ if we are not a block, then wrap as a block, must have compatible shape """ - if not isinstance(result, Block): - result = make_block(result, - self.items, - self.ref_items) - return result - def merge(self, other): - if not self.ref_items.equals(other.ref_items): - raise AssertionError('Merge operands must have same ref_items') - - # Not sure whether to allow this or not - # if not union_ref.equals(other.ref_items): - # union_ref = self.ref_items + other.ref_items - return _merge_blocks([self, other], self.ref_items) + return _merge_blocks([self, other]) def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): @@ -249,62 +199,9 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, new_values = com.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) - return make_block(new_values, self.items, self.ref_items, + return make_block(new_values, ndim=self.ndim, fastpath=True, - placement=self._ref_locs) - - def reindex_items_from(self, new_ref_items, indexer=None, method=None, - fill_value=None, limit=None, copy=True): - """ - Reindex to only those items contained in the input set of items - - E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], - then the resulting items will be ['b'] - - Returns - ------- - reindexed : Block - """ - if indexer is None: - new_ref_items, indexer = self.items.reindex(new_ref_items, - limit=limit) - - needs_fill = method is not None - if fill_value is None: - fill_value = self.fill_value - - new_items = new_ref_items - if indexer is None: - new_values = self.values.copy() if copy else self.values - - else: - - # single block reindex, filling is already happending - if self.ndim == 1: - new_values = com.take_1d(self.values, indexer, - fill_value=fill_value) - block = make_block(new_values, new_items, new_ref_items, - ndim=self.ndim, fastpath=True) - return block - else: - - masked_idx = indexer[indexer != -1] - new_items = self.items.take(masked_idx) - new_values = com.take_nd(self.values, masked_idx, axis=0, - allow_fill=False) - # fill if needed - if needs_fill: - new_values = com.interpolate_2d(new_values, method=method, - limit=limit, fill_value=fill_value) - - block = make_block(new_values, new_items, new_ref_items, - ndim=self.ndim, fastpath=True) - - # down cast if needed - if not self.is_float and (needs_fill or notnull(fill_value)): - block = block.downcast() - - return block + placement=self.mgr_locs) def get(self, item): loc = self.items.get_loc(item) @@ -313,7 +210,7 @@ def get(self, item): def iget(self, i): return self.values[i] - def set(self, item, value, check=False): + def set(self, locs, values, check=False): """ Modify Block in-place with new item value @@ -321,50 +218,22 @@ def set(self, item, value, check=False): ------- None """ - loc = self.items.get_loc(item) - self.values[loc] = value - - def delete(self, item): - """ - Returns - ------- - y : Block (new object) - """ - loc = self.items.get_loc(item) - new_items = self.items.delete(loc) - new_values = np.delete(self.values, loc, 0) - return make_block(new_values, new_items, self.ref_items, - ndim=self.ndim, klass=self.__class__, fastpath=True) + self.values[locs] = values - def split_block_at(self, item): + def delete(self, loc): """ - Split block into zero or more blocks around columns with given label, - for "deleting" a column without having to copy data by returning views - on the original array. - - Returns - ------- - generator of Block + Delete given loc(-s) from block in-place. """ - loc = self.items.get_loc(item) - - if type(loc) == slice or type(loc) == int: - mask = [True] * len(self) - mask[loc] = False - else: # already a mask, inverted - mask = -loc - - for s, e in com.split_ranges(mask): - yield make_block(self.values[s:e], - self.items[s:e].copy(), - self.ref_items, - ndim=self.ndim, - klass=self.__class__, - fastpath=True) + self.values = np.delete(self.values, loc, 0) + self.mgr_locs = self.mgr_locs.delete(loc) def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """ - return self.as_block(func(self.values)) + result = func(self.values) + if not isinstance(result, Block): + result = make_block(values=result, placement=self.mgr_locs,) + + return result def fillna(self, value, limit=None, inplace=False, downcast=None): if not self._can_hold_na: @@ -415,8 +284,8 @@ def downcast(self, dtypes=None): dtypes = 'infer' nv = _possibly_downcast_to_dtype(values, dtypes) - return [make_block(nv, self.items, self.ref_items, ndim=self.ndim, - fastpath=True)] + return [make_block(nv, ndim=self.ndim, + fastpath=True, placement=self.mgr_locs)] # ndim > 1 if dtypes is None: @@ -429,11 +298,12 @@ def downcast(self, dtypes=None): # item-by-item # this is expensive as it splits the blocks items-by-item blocks = [] - for i, item in enumerate(self.items): + for i, rl in enumerate(self.mgr_locs): if dtypes == 'infer': dtype = 'infer' else: + raise AssertionError("dtypes as dict is not supported yet") dtype = dtypes.get(item, self._downcast_dtype) if dtype is None: @@ -442,8 +312,9 @@ def downcast(self, dtypes=None): nv = _possibly_downcast_to_dtype(values[i], dtype) nv = _block_shape(nv, ndim=self.ndim) - blocks.append(make_block(nv, Index([item]), self.ref_items, - ndim=self.ndim, fastpath=True)) + blocks.append(make_block(nv, + ndim=self.ndim, fastpath=True, + placement=[rl])) return blocks @@ -466,9 +337,11 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None, try: # force the copy here if values is None: - values = com._astype_nansafe(self.values, dtype, copy=True) - newb = make_block(values, self.items, self.ref_items, - ndim=self.ndim, placement=self._ref_locs, + # _astype_nansafe works fine with 1-d only + values = com._astype_nansafe(self.values.ravel(), dtype, copy=True) + values = values.reshape(self.values.shape) + newb = make_block(values, + ndim=self.ndim, placement=self.mgr_locs, fastpath=True, dtype=dtype, klass=klass) except: if raise_on_error is True: @@ -482,7 +355,7 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None, "(%s [%s])" % (copy, self.dtype.name, self.itemsize, newb.dtype.name, newb.itemsize)) - return [newb] + return newb def convert(self, copy=True, **kwargs): """ attempt to coerce any object types to better types @@ -491,31 +364,6 @@ def convert(self, copy=True, **kwargs): return [self.copy()] if copy else [self] - def prepare_for_merge(self, **kwargs): - """ a regular block is ok to merge as is """ - return self - - def post_merge(self, items, **kwargs): - """ we are non-sparse block, try to convert to a sparse block(s) """ - overlap = set(items.keys()) & set(self.items) - if len(overlap): - overlap = _ensure_index(overlap) - - new_blocks = [] - for item in overlap: - dtypes = set(items[item]) - - # this is a safe bet with multiple dtypes - dtype = list(dtypes)[0] if len(dtypes) == 1 else np.float64 - - b = make_block(SparseArray(self.get(item), dtype=dtype), - [item], self.ref_items) - new_blocks.append(b) - - return new_blocks - - return self - def _can_hold_element(self, value): raise NotImplementedError() @@ -581,15 +429,13 @@ def to_native_types(self, slicer=None, na_rep='', **kwargs): return values.tolist() # block actions #### - def copy(self, deep=True, ref_items=None): + def copy(self, deep=True): values = self.values if deep: values = values.copy() - if ref_items is None: - ref_items = self.ref_items - return make_block(values, self.items, ref_items, ndim=self.ndim, + return make_block(values, ndim=self.ndim, klass=self.__class__, fastpath=True, - placement=self._ref_locs) + placement=self.mgr_locs) def replace(self, to_replace, value, inplace=False, filter=None, regex=False): @@ -599,9 +445,8 @@ def replace(self, to_replace, value, inplace=False, filter=None, compatibility.""" mask = com.mask_missing(self.values, to_replace) if filter is not None: - for i, item in enumerate(self.items): - if item not in filter: - mask[i] = False + filtered_out = ~self.mgr_locs.isin(filter) + mask[filtered_out.nonzero()[0]] = False if not mask.any(): if inplace: @@ -672,8 +517,8 @@ def setitem(self, indexer, value): dtype = 'infer' values = self._try_coerce_result(values) values = self._try_cast_result(values, dtype) - return [make_block(transf(values), self.items, self.ref_items, - ndim=self.ndim, placement=self._ref_locs, + return [make_block(transf(values), + ndim=self.ndim, placement=self.mgr_locs, fastpath=True)] except (ValueError, TypeError) as detail: raise @@ -704,21 +549,11 @@ def putmask(self, mask, new, align=True, inplace=False): # may need to align the new if hasattr(new, 'reindex_axis'): - if align: - axis = getattr(new, '_info_axis_number', 0) - new = new.reindex_axis(self.items, axis=axis, - copy=False).values.T - else: - new = new.values.T + new = new.values.T # may need to align the mask if hasattr(mask, 'reindex_axis'): - if align: - axis = getattr(mask, '_info_axis_number', 0) - mask = mask.reindex_axis( - self.items, axis=axis, copy=False).values.T - else: - mask = mask.values.T + mask = mask.values.T # if we are passed a scalar None, convert it here if not is_list_like(new) and isnull(new): @@ -738,45 +573,8 @@ def putmask(self, mask, new, align=True, inplace=False): # need to go column by column new_blocks = [] - - def create_block(v, m, n, item, reshape=True): - """ return a new block, try to preserve dtype if possible """ - - # n should be the length of the mask or a scalar here - if not is_list_like(n): - n = np.array([n] * len(m)) - - # see if we are only masking values that if putted - # will work in the current dtype - nv = None - try: - nn = n[m] - nn_at = nn.astype(self.dtype) - if (nn == nn_at).all(): - nv = v.copy() - nv[mask] = nn_at - except (ValueError, IndexError, TypeError): - pass - - # change the dtype - if nv is None: - dtype, _ = com._maybe_promote(n.dtype) - nv = v.astype(dtype) - try: - nv[m] = n - except ValueError: - idx, = np.where(np.squeeze(m)) - for mask_index, new_val in zip(idx, n): - nv[mask_index] = new_val - - if reshape: - nv = _block_shape(nv) - return make_block(nv, [item], self.ref_items) - else: - return make_block(nv, item, self.ref_items) - if self.ndim > 1: - for i, item in enumerate(self.items): + for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] @@ -792,27 +590,31 @@ def create_block(v, m, n, item, reshape=True): # we need to exiplicty astype here to make a copy n = n.astype(dtype) - block = create_block(v, m, n, item) - + nv = _putmask_smart(v, m, n) else: nv = v if inplace else v.copy() - nv = _block_shape(nv) - block = make_block( - nv, Index([item]), self.ref_items, fastpath=True) + + # Put back the dimension that was taken from it and make + # a block out of the result. + block = make_block(values=nv[np.newaxis], + placement=[ref_loc], + fastpath=True) new_blocks.append(block) else: - new_blocks.append(create_block(new_values, mask, new, - self.items, reshape=False)) + nv = _putmask_smart(new_values, mask, new) + new_blocks.append(make_block(values=nv, + placement=self.mgr_locs, + fastpath=True)) return new_blocks if inplace: return [self] - return [make_block(new_values, self.items, self.ref_items, - placement=self._ref_locs, fastpath=True)] + return [make_block(new_values, + placement=self.mgr_locs, fastpath=True)] def interpolate(self, method='pad', axis=0, index=None, values=None, inplace=False, limit=None, @@ -891,9 +693,9 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, dtype=self.dtype) values = self._try_coerce_result(values) - blocks = [make_block(values, self.items, self.ref_items, + blocks = [make_block(values, ndim=self.ndim, klass=self.__class__, - fastpath=True)] + fastpath=True, placement=self.mgr_locs)] return self._maybe_downcast(blocks, downcast) def _interpolate(self, method=None, index=None, values=None, @@ -930,36 +732,49 @@ def func(x): # interp each column independently interp_values = np.apply_along_axis(func, axis, data) - blocks = [make_block(interp_values, self.items, self.ref_items, - ndim=self.ndim, klass=self.__class__, fastpath=True)] + blocks = [make_block(interp_values, + ndim=self.ndim, klass=self.__class__, + fastpath=True, placement=self.mgr_locs)] return self._maybe_downcast(blocks, downcast) - def take(self, indexer, ref_items, new_axis, axis=1): - if axis < 1: - raise AssertionError('axis must be at least 1, got %d' % axis) - new_values = com.take_nd(self.values, indexer, axis=axis, - allow_fill=False) + def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb - # need to preserve the ref_locs and just shift them - # GH6121 - ref_locs = None - if not new_axis.is_unique: - ref_locs = self._ref_locs + """ + if fill_tuple is None: + fill_value = self.fill_value + new_values = com.take_nd(self.get_values(), indexer, axis=axis, + allow_fill=False) + else: + fill_value = fill_tuple[0] + new_values = com.take_nd(self.get_values(), indexer, axis=axis, + allow_fill=True, fill_value=fill_value) + + if new_mgr_locs is None: + if axis == 0: + slc = lib.indexer_as_slice(indexer) + if slc is not None: + new_mgr_locs = self.mgr_locs[slc] + else: + new_mgr_locs = self.mgr_locs[indexer] + else: + new_mgr_locs = self.mgr_locs - return [make_block(new_values, self.items, ref_items, ndim=self.ndim, - klass=self.__class__, placement=ref_locs, fastpath=True)] + if new_values.dtype != self.dtype: + return make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) def get_values(self, dtype=None): return self.values - def get_merge_length(self): - return len(self.values) - def diff(self, n): """ return block for the diff of the values """ new_values = com.diff(self.values, n, axis=1) - return [make_block(new_values, self.items, self.ref_items, - ndim=self.ndim, fastpath=True)] + return [make_block(values=new_values, + ndim=self.ndim, fastpath=True, + placement=self.mgr_locs)] def shift(self, periods, axis=0): """ shift the block by periods, possibly upcast """ @@ -983,8 +798,9 @@ def shift(self, periods, axis=0): if f_ordered: new_values = new_values.T - return [make_block(new_values, self.items, self.ref_items, - ndim=self.ndim, fastpath=True)] + return [make_block(new_values, + ndim=self.ndim, fastpath=True, + placement=self.mgr_locs)] def eval(self, func, other, raise_on_error=True, try_cast=False): """ @@ -1003,11 +819,8 @@ def eval(self, func, other, raise_on_error=True, try_cast=False): """ values = self.values - # see if we can align other if hasattr(other, 'reindex_axis'): - axis = getattr(other, '_info_axis_number', 0) - other = other.reindex_axis( - self.items, axis=axis, copy=False).values + other = other.values # make sure that we can broadcast is_transposed = False @@ -1078,8 +891,8 @@ def handle_error(): if try_cast: result = self._try_cast_result(result) - return [make_block(result, self.items, self.ref_items, ndim=self.ndim, - fastpath=True)] + return [make_block(result, ndim=self.ndim, + fastpath=True, placement=self.mgr_locs)] def where(self, other, cond, align=True, raise_on_error=True, try_cast=False): @@ -1103,12 +916,7 @@ def where(self, other, cond, align=True, raise_on_error=True, # see if we can align other if hasattr(other, 'reindex_axis'): - if align: - axis = getattr(other, '_info_axis_number', 0) - other = other.reindex_axis(self.items, axis=axis, - copy=True).values - else: - other = other.values + other = other.values # make sure that we can broadcast is_transposed = False @@ -1129,10 +937,7 @@ def where(self, other, cond, align=True, raise_on_error=True, raise ValueError( "where must have a condition that is ndarray like") - if align and hasattr(cond, 'reindex_axis'): - axis = getattr(cond, '_info_axis_number', 0) - cond = cond.reindex_axis(self.items, axis=axis, copy=True).values - else: + if hasattr(cond, 'reindex_axis'): cond = cond.values # may need to undo transpose of values @@ -1177,8 +982,8 @@ def func(c, v, o): if try_cast: result = self._try_cast_result(result) - return make_block(result, self.items, self.ref_items, - ndim=self.ndim) + return make_block(result, + ndim=self.ndim, placement=self.mgr_locs) # might need to separate out blocks axis = cond.ndim - 1 @@ -1189,11 +994,10 @@ def func(c, v, o): result_blocks = [] for m in [mask, ~mask]: if m.any(): - items = self.items[m] - slices = [slice(None)] * cond.ndim - slices[axis] = self.items.get_indexer(items) - r = self._try_cast_result(result[slices]) - result_blocks.append(make_block(r.T, items, self.ref_items)) + r = self._try_cast_result( + result.take(m.nonzero()[0], axis=axis)) + result_blocks.append(make_block(r.T, + placement=self.mgr_locs[m])) return result_blocks @@ -1203,11 +1007,13 @@ def equals(self, other): class NumericBlock(Block): + __slots__ = () is_numeric = True _can_hold_na = True class FloatOrComplexBlock(NumericBlock): + __slots__ = () def equals(self, other): if self.dtype != other.dtype or self.shape != other.shape: return False @@ -1215,6 +1021,7 @@ def equals(self, other): return ((left == right) | (np.isnan(left) & np.isnan(right))).all() class FloatBlock(FloatOrComplexBlock): + __slots__ = () is_float = True _downcast_dtype = 'int64' @@ -1255,6 +1062,7 @@ def should_store(self, value): class ComplexBlock(FloatOrComplexBlock): + __slots__ = () is_complex = True def _can_hold_element(self, element): @@ -1275,6 +1083,7 @@ def should_store(self, value): class IntBlock(NumericBlock): + __slots__ = () is_integer = True _can_hold_na = False @@ -1295,6 +1104,7 @@ def should_store(self, value): class TimeDeltaBlock(IntBlock): + __slots__ = () is_timedelta = True _can_hold_na = True is_numeric = False @@ -1379,6 +1189,7 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): class BoolBlock(NumericBlock): + __slots__ = () is_bool = True _can_hold_na = False @@ -1406,16 +1217,18 @@ def replace(self, to_replace, value, inplace=False, filter=None, inplace=inplace, filter=filter, regex=regex) + class ObjectBlock(Block): + __slots__ = () is_object = True _can_hold_na = True - def __init__(self, values, items, ref_items, ndim=2, fastpath=False, + def __init__(self, values, ndim=2, fastpath=False, placement=None): if issubclass(values.dtype.type, compat.string_types): values = np.array(values, dtype=object) - super(ObjectBlock, self).__init__(values, items, ref_items, ndim=ndim, + super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath, placement=placement) @@ -1436,11 +1249,10 @@ def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=T """ # attempt to create new type blocks - is_unique = self.items.is_unique blocks = [] if by_item and not self._is_single_block: - for i, c in enumerate(self.items): + for i, rl in enumerate(self.mgr_locs): values = self.iget(i) values = com._possibly_convert_objects( @@ -1449,10 +1261,8 @@ def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=T convert_timedeltas=convert_timedeltas, ).reshape(values.shape) values = _block_shape(values, ndim=self.ndim) - items = self.items.take([i]) - placement = None if is_unique else [i] - newb = make_block(values, items, self.ref_items, - ndim=self.ndim, placement=placement) + newb = make_block(values, + ndim=self.ndim, placement=[rl]) blocks.append(newb) else: @@ -1461,12 +1271,12 @@ def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=T self.values.ravel(), convert_dates=convert_dates, convert_numeric=convert_numeric ).reshape(self.values.shape) - blocks.append(make_block(values, self.items, self.ref_items, - ndim=self.ndim)) + blocks.append(make_block(values, + ndim=self.ndim, placement=self.mgr_locs)) return blocks - def set(self, item, value, check=False): + def set(self, locs, values, check=False): """ Modify Block in-place with new item value @@ -1475,26 +1285,24 @@ def set(self, item, value, check=False): None """ - loc = self.items.get_loc(item) - # GH6026 if check: try: - if (self.values[loc] == value).all(): + if (self.values[locs] == values).all(): return except: pass try: - self.values[loc] = value + self.values[locs] = values except (ValueError): # broadcasting error # see GH6171 - new_shape = list(value.shape) + new_shape = list(values.shape) new_shape[0] = len(self.items) self.values = np.empty(tuple(new_shape),dtype=self.dtype) self.values.fill(np.nan) - self.values[loc] = value + self.values[locs] = values def _maybe_downcast(self, blocks, downcast=None): @@ -1613,27 +1421,29 @@ def re_replacer(s): f = np.vectorize(re_replacer, otypes=[self.dtype]) - try: - filt = lmap(self.items.get_loc, filter) - except TypeError: + if filter is None: filt = slice(None) + else: + filt = self.mgr_locs.isin(filter).nonzero()[0] new_values[filt] = f(new_values[filt]) - return [self if inplace else make_block(new_values, self.items, - self.ref_items, fastpath=True)] + return [self if inplace else + make_block(new_values, + fastpath=True, placement=self.mgr_locs)] class DatetimeBlock(Block): + __slots__ = () is_datetime = True _can_hold_na = True - def __init__(self, values, items, ref_items, fastpath=False, - placement=None, **kwargs): + def __init__(self, values, placement, + fastpath=False, **kwargs): if values.dtype != _NS_DTYPE: values = tslib.cast_to_nanoseconds(values) - super(DatetimeBlock, self).__init__(values, items, ref_items, + super(DatetimeBlock, self).__init__(values, fastpath=True, placement=placement, **kwargs) @@ -1705,7 +1515,8 @@ def fillna(self, value, limit=None, np.putmask(values, mask, value) return [self if inplace else - make_block(values, self.items, self.ref_items, fastpath=True)] + make_block(values, + fastpath=True, placement=self.mgr_locs)] def to_native_types(self, slicer=None, na_rep=None, date_format=None, **kwargs): @@ -1745,7 +1556,7 @@ def astype(self, dtype, copy=False, raise_on_error=True): return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, klass=klass) - def set(self, item, value, check=False): + def set(self, locs, values, check=False): """ Modify Block in-place with new item value @@ -1753,12 +1564,11 @@ def set(self, item, value, check=False): ------- None """ - loc = self.items.get_loc(item) - - if value.dtype != _NS_DTYPE: - value = tslib.cast_to_nanoseconds(value) + if values.dtype != _NS_DTYPE: + # Workaround for numpy 1.6 bug + values = tslib.cast_to_nanoseconds(values) - self.values[loc] = value + self.values[locs] = values def get_values(self, dtype=None): # return object dtype as Timestamps @@ -1769,9 +1579,8 @@ def get_values(self, dtype=None): class SparseBlock(Block): - """ implement as a list of sparse arrays of the same dtype """ - __slots__ = ['items', 'ref_items', '_ref_locs', 'ndim', 'values'] + __slots__ = () is_sparse = True is_numeric = True _can_hold_na = True @@ -1779,34 +1588,27 @@ class SparseBlock(Block): _verify_integrity = False _ftype = 'sparse' - def __init__(self, values, items, ref_items, ndim=None, fastpath=False, - placement=None): + def __init__(self, values, placement, + ndim=None, fastpath=False,): # kludgetastic - if ndim is not None: - if ndim == 1: - ndim = 1 - elif ndim > 2: - ndim = ndim - else: - if len(items) != 1: + if ndim is None: + if len(placement) != 1: ndim = 1 else: ndim = 2 self.ndim = ndim - self._ref_locs = None + self.mgr_locs = placement + + if not isinstance(values, SparseArray): + raise TypeError("values must be SparseArray") + self.values = values - if fastpath: - self.items = items - self.ref_items = ref_items - else: - self.items = _ensure_index(items) - self.ref_items = _ensure_index(ref_items) @property def shape(self): - return (len(self.items), self.sp_index.length) + return (len(self.mgr_locs), self.sp_index.length) @property def itemsize(self): @@ -1814,6 +1616,7 @@ def itemsize(self): @property def fill_value(self): + #return np.nan return self.values.fill_value @fill_value.setter @@ -1832,7 +1635,13 @@ def sp_values(self, v): # reset the sparse values self.values = SparseArray(v, sparse_index=self.sp_index, kind=self.kind, dtype=v.dtype, - fill_value=self.fill_value, copy=False) + fill_value=self.values.fill_value, + copy=False) + + def iget(self, col): + if col != 0: + raise IndexError("SparseBlock only contains one item") + return self.values @property def sp_index(self): @@ -1851,15 +1660,9 @@ def __len__(self): def should_store(self, value): return isinstance(value, SparseArray) - def prepare_for_merge(self, **kwargs): - """ create a dense block """ - return make_block(self.get_values(), self.items, self.ref_items) - - def post_merge(self, items, **kwargs): - return self - - def set(self, item, value, check=False): - self.values = value + def set(self, locs, values, check=False): + assert locs.tolist() == [0] + self.values = values def get(self, item): if self.ndim == 1: @@ -1879,33 +1682,52 @@ def get_values(self, dtype=None): values = values.reshape((1,) + values.shape) return values - def get_merge_length(self): - return 1 - - def make_block(self, values, items=None, ref_items=None, sparse_index=None, - kind=None, dtype=None, fill_value=None, copy=False, - fastpath=True): + def copy(self, deep=True): + return self.make_block_same_class(values=self.values, + sparse_index=self.sp_index, + kind=self.kind, copy=deep, + placement=self.mgr_locs) + + def make_block_same_class(self, values, placement, + sparse_index=None, kind=None, dtype=None, + fill_value=None, copy=False, fastpath=True): """ return a new block """ if dtype is None: dtype = self.dtype if fill_value is None: - fill_value = self.fill_value - if items is None: - items = self.items - if ref_items is None: - ref_items = self.ref_items + fill_value = self.values.fill_value + + # if not isinstance(values, SparseArray) and values.ndim != self.ndim: + # raise ValueError("ndim mismatch") + + if values.ndim == 2: + nitems = values.shape[0] + + if nitems == 0: + # kludgy, but SparseBlocks cannot handle slices, where the + # output is 0-item, so let's convert it to a dense block: it + # won't take space since there's 0 items, plus it will preserve + # the dtype. + return make_block(np.empty(values.shape, dtype=dtype), + placement, fastpath=True,) + elif nitems > 1: + raise ValueError("Only 1-item 2d sparse blocks are supported") + else: + values = values.reshape(values.shape[1]) + new_values = SparseArray(values, sparse_index=sparse_index, kind=kind or self.kind, dtype=dtype, fill_value=fill_value, copy=copy) - return make_block(new_values, items, ref_items, ndim=self.ndim, - fastpath=fastpath) + return make_block(new_values, ndim=self.ndim, + fastpath=fastpath, placement=placement) def interpolate(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, **kwargs): values = com.interpolate_2d( self.values.to_dense(), method, axis, limit, fill_value) - return self.make_block(values, self.items, self.ref_items) + return self.make_block_same_class(values=values, + placement=self.mgr_locs) def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype @@ -1914,8 +1736,9 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): if issubclass(self.dtype.type, np.floating): value = float(value) values = self.values if inplace else self.values.copy() - return [self.make_block(values.get_values(value), fill_value=value)] - + return [self.make_block_same_class(values=values.get_values(value), + fill_value=value, + placement=self.mgr_locs)] def shift(self, periods, axis=0): """ shift the block by periods """ @@ -1933,15 +1756,7 @@ def shift(self, periods, axis=0): new_values[:periods] = fill_value else: new_values[periods:] = fill_value - return [self.make_block(new_values)] - - def take(self, indexer, ref_items, new_axis, axis=1): - """ going to take our items - along the long dimension""" - if axis < 1: - raise AssertionError('axis must be at least 1, got %d' % axis) - - return [self.make_block(self.values.take(indexer))] + return [self.make_block_same_class(new_values, placement=self.mgr_locs)] def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, limit=None, mask_info=None): @@ -1954,53 +1769,9 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, # taking on the 0th axis always here if fill_value is None: fill_value = self.fill_value - return self.make_block(self.values.take(indexer), items=self.items, - fill_value=fill_value) - - def reindex_items_from(self, new_ref_items, indexer=None, method=None, - fill_value=None, limit=None, copy=True): - """ - Reindex to only those items contained in the input set of items - - E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], - then the resulting items will be ['b'] - - Returns - ------- - reindexed : Block - """ - - # 1-d always - if indexer is None: - new_ref_items, indexer = self.items.reindex(new_ref_items, - limit=limit) - if indexer is None: - indexer = np.arange(len(self.items)) - - # single block - if self.ndim == 1: - - new_items = new_ref_items - new_values = com.take_1d(self.values.values, indexer) - - else: - - # if we don't overlap at all, then don't include this block - new_items = self.items & new_ref_items - if not len(new_items): - return None - - new_values = self.values.values - - # fill if needed - if method is not None or limit is not None: - if fill_value is None: - fill_value = self.fill_value - new_values = com.interpolate_2d(new_values, method=method, - limit=limit, fill_value=fill_value) - - return self.make_block(new_values, items=new_items, - ref_items=new_ref_items, copy=copy) + return self.make_block_same_class(self.values.take(indexer), + fill_value=fill_value, + placement=self.mgr_locs) def sparse_reindex(self, new_index): """ sparse reindex and return a new block @@ -2008,19 +1779,15 @@ def sparse_reindex(self, new_index): values = self.values values = values.sp_index.to_int_index().reindex( values.sp_values.astype('float64'), values.fill_value, new_index) - return self.make_block(values, sparse_index=new_index) - - def split_block_at(self, item): - if len(self.items) == 1 and item == self.items[0]: - return [] - return super(SparseBlock, self).split_block_at(self, item) + return self.make_block_same_class(values, sparse_index=new_index, + placement=self.mgr_locs) def _try_cast_result(self, result, dtype=None): return result -def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, - fastpath=False, placement=None): +def make_block(values, placement, klass=None, ndim=None, + dtype=None, fastpath=False): if klass is None: dtype = dtype or values.dtype vtype = dtype.type @@ -2066,7 +1833,7 @@ def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, if klass is None: klass = ObjectBlock - return klass(values, items, ref_items, ndim=ndim, fastpath=fastpath, + return klass(values, ndim=ndim, fastpath=fastpath, placement=placement) @@ -2082,6 +1849,42 @@ class BlockManager(PandasObject): lightweight blocked set of labeled data to be manipulated by the DataFrame public API class + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtype_counts + get_ftype_counts + get_dtypes + get_ftypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + get_scalar(label_tup) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + Parameters ---------- @@ -2091,28 +1894,28 @@ class BlockManager(PandasObject): This is *not* a public API class """ __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', - '_is_consolidated', '_has_sparse', '_ref_locs', '_items_map'] + '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): self.axes = [_ensure_index(ax) for ax in axes] - self.blocks = blocks + self.blocks = tuple(blocks) - ndim = self.ndim for block in blocks: - if not block.is_sparse and ndim != block.ndim: - raise AssertionError(('Number of Block dimensions (%d) must ' - 'equal number of axes (%d)') - % (block.ndim, ndim)) + if block.is_sparse: + if len(block.mgr_locs) != 1: + raise AssertionError("Sparse block refers to multiple items") + else: + if self.ndim != block.ndim: + raise AssertionError(('Number of Block dimensions (%d) must ' + 'equal number of axes (%d)') + % (block.ndim, self.ndim)) if do_integrity_check: self._verify_integrity() - self._has_sparse = False self._consolidate_check() - # we have a duplicate items index, setup the block maps - if not self.items.is_unique: - self._set_ref_locs(do_refs=True) + self._rebuild_blknos_and_blklocs() def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ @@ -2136,182 +1939,77 @@ def __nonzero__(self): @property def shape(self): - if getattr(self, '_shape', None) is None: - self._shape = tuple(len(ax) for ax in self.axes) - return self._shape + return tuple(len(ax) for ax in self.axes) @property def ndim(self): - if getattr(self, '_ndim', None) is None: - self._ndim = len(self.axes) - return self._ndim + return len(self.axes) - def _set_axis(self, axis, value, check_axis=True): - cur_axis = self.axes[axis] - value = _ensure_index(value) + def set_axis(self, axis, new_labels): + new_labels = _ensure_index(new_labels) + old_len = len(self.axes[axis]) + new_len = len(new_labels) - if check_axis and len(value) != len(cur_axis): + if new_len != old_len: raise ValueError('Length mismatch: Expected axis has %d elements, ' - 'new values have %d elements' % (len(cur_axis), - len(value))) - - self.axes[axis] = value - self._shape = None - return cur_axis, value - - def set_axis(self, axis, value, maybe_rename=True, check_axis=True): - cur_axis, value = self._set_axis(axis, value, check_axis) - - if axis == 0: - - # set/reset ref_locs based on the current index - # and map the new index if needed - self._set_ref_locs(labels=cur_axis) - - # take via ref_locs - for block in self.blocks: - block.set_ref_items(self.items, maybe_rename=maybe_rename) - - # set/reset ref_locs based on the new index - self._set_ref_locs(labels=value, do_refs=True) - - def _reset_ref_locs(self): - """ take the current _ref_locs and reset ref_locs on the blocks - to correctly map, ignoring Nones; - reset both _items_map and _ref_locs """ - - # let's reset the ref_locs in individual blocks - if self.items.is_unique: - for b in self.blocks: - b._ref_locs = None - else: - for b in self.blocks: - b.reset_ref_locs() - self._rebuild_ref_locs() + 'new values have %d elements' % (old_len, new_len)) - self._ref_locs = None - self._items_map = None + self.axes[axis] = new_labels - def _rebuild_ref_locs(self): - """Take _ref_locs and set the individual block ref_locs, skipping Nones - no effect on a unique index + def rename_axis(self, mapper, axis, copy=True): """ - if getattr(self, '_ref_locs', None) is not None: - item_count = 0 - for v in self._ref_locs: - if v is not None: - block, item_loc = v - if block._ref_locs is None: - block.reset_ref_locs() - block._ref_locs[item_loc] = item_count - item_count += 1 - - def _set_ref_locs(self, labels=None, do_refs=False): - """ - if we have a non-unique index on this axis, set the indexers - we need to set an absolute indexer for the blocks - return the indexer if we are not unique + Rename one of axes. - labels : the (new) labels for this manager - ref : boolean, whether to set the labels (one a 1-1 mapping) + Parameters + ---------- + mapper : unary callable + axis : int + copy : boolean, default True """ + obj = self.copy(deep=copy) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper)) + return obj - if labels is None: - labels = self.items - - # we are unique, and coming from a unique - is_unique = labels.is_unique - if is_unique and not do_refs: - - if not self.items.is_unique: - - # reset our ref locs - self._ref_locs = None - for b in self.blocks: - b._ref_locs = None + def add_prefix(self, prefix): + f = (str(prefix) + '%s').__mod__ + return self.rename_axis(f, axis=0) - return None + def add_suffix(self, suffix): + f = ('%s' + str(suffix)).__mod__ + return self.rename_axis(f, axis=0) - # we are going to a non-unique index - # we have ref_locs on the block at this point - if (not is_unique and do_refs) or do_refs == 'force': + @property + def _is_single_block(self): + if self.ndim == 1: + return True - # create the items map - im = getattr(self, '_items_map', None) - if im is None: + if len(self.blocks) != 1: + return False - im = dict() - for block in self.blocks: + blk = self.blocks[0] + return (blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice == slice(0, len(self), 1)) - # if we have a duplicate index but - # _ref_locs have not been set - try: - rl = block.ref_locs - except: - raise AssertionError( - 'Cannot create BlockManager._ref_locs because ' - 'block [%s] with duplicate items [%s] does not ' - 'have _ref_locs set' % (block, labels)) - - m = maybe_create_block_in_items_map(im, block) - for i, item in enumerate(block.items): - m[i] = rl[i] - - self._items_map = im - - # create the _ref_loc map here - rl = [None] * len(labels) - for block, items in im.items(): - for i, loc in enumerate(items): - rl[loc] = (block, i) - self._ref_locs = rl - return rl - - elif do_refs: - self._reset_ref_locs() - - # return our cached _ref_locs (or will compute again - # when we recreate the block manager if needed - return getattr(self, '_ref_locs', None) - - def get_items_map(self, use_cached=True): + def _rebuild_blknos_and_blklocs(self): """ - return an inverted ref_loc map for an item index - block -> item (in that block) location -> column location - - use_cached : boolean, use the cached items map, or recreate + Update mgr._blknos / mgr._blklocs. """ + new_blknos = np.empty(self.shape[0], dtype=np.int64) + new_blklocs = np.empty(self.shape[0], dtype=np.int64) + new_blknos.fill(-1) + new_blklocs.fill(-1) - # cache check - if use_cached: - im = getattr(self, '_items_map', None) - if im is not None: - return im + for blkno, blk in enumerate(self.blocks): + rl = blk.mgr_locs + new_blknos[rl.indexer] = blkno + new_blklocs[rl.indexer] = np.arange(len(rl)) - im = dict() - rl = self._set_ref_locs() + if (new_blknos == -1).any(): + raise AssertionError("Gaps in blk ref_locs") - # we have a non-duplicative index - if rl is None: - - axis = self.axes[0] - for block in self.blocks: - - m = maybe_create_block_in_items_map(im, block) - for i, item in enumerate(block.items): - m[i] = axis.get_loc(item) - - # use the ref_locs to construct the map - else: - - for i, (block, idx) in enumerate(rl): - - m = maybe_create_block_in_items_map(im, block) - m[idx] = i - - self._items_map = im - return im + self._blknos = new_blknos + self._blklocs = new_blklocs # make items read only for now def _get_items(self): @@ -2327,23 +2025,6 @@ def _get_counts(self, f): counts[v] = counts.get(v, 0) + b.shape[0] return counts - def _get_types(self, f): - """ return a list of the f per item """ - self._consolidate_inplace() - - # unique - if self.items.is_unique: - l = [ None ] * len(self.items) - for b in self.blocks: - v = f(b) - for rl in b.ref_locs: - l[rl] = v - return l - - # non-unique - ref_locs = self._set_ref_locs() - return [ f(ref_locs[i][0]) for i, item in enumerate(self.items) ] - def get_dtype_counts(self): return self._get_counts(lambda b: b.dtype.name) @@ -2351,14 +2032,16 @@ def get_ftype_counts(self): return self._get_counts(lambda b: b.ftype) def get_dtypes(self): - return self._get_types(lambda b: b.dtype) + dtypes = np.array([blk.dtype for blk in self.blocks]) + return dtypes.take(self._blknos) def get_ftypes(self): - return self._get_types(lambda b: b.ftype) + ftypes = np.array([blk.ftype for blk in self.blocks]) + return ftypes.take(self._blknos) def __getstate__(self): block_values = [b.values for b in self.blocks] - block_items = [b.items for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = [ax for ax in self.axes] return axes_array, block_values, block_items @@ -2376,16 +2059,17 @@ def __setstate__(self, state): if values.dtype == 'M8[us]': values = values.astype('M8[ns]') - blk = make_block(values, items, self.axes[0]) + blk = make_block(values, + placement=self.axes[0].get_indexer(items)) blocks.append(blk) - self.blocks = blocks + self.blocks = tuple(blocks) self._post_setstate() def _post_setstate(self): self._is_consolidated = False self._known_consolidated = False - self._set_has_sparse() + self._rebuild_blknos_and_blklocs() def __len__(self): return len(self.items) @@ -2394,24 +2078,20 @@ def __unicode__(self): output = com.pprint_thing(self.__class__.__name__) for i, ax in enumerate(self.axes): if i == 0: - output += '\nItems: %s' % ax + output += u('\nItems: %s') % ax else: - output += '\nAxis %d: %s' % (i, ax) + output += u('\nAxis %d: %s') % (i, ax) for block in self.blocks: - output += '\n%s' % com.pprint_thing(block) + output += u('\n%s') % com.pprint_thing(block) return output def _verify_integrity(self): mgr_shape = self.shape - tot_items = sum(len(x.items) for x in self.blocks) + tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: - if block.ref_items is not self.items: - raise AssertionError("Block ref_items must be BlockManager " - "items") - if not block.is_sparse and block.values.shape[1:] != mgr_shape[1:]: - construction_error( - tot_items, block.values.shape[1:], self.axes) + if not block.is_sparse and block.shape[1:] != mgr_shape[1:]: + construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError('Number of manager items must equal union of ' 'block items\n# manager items: {0}, # ' @@ -2437,18 +2117,57 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): """ result_blocks = [] - for blk in self.blocks: + + # filter kwarg is used in replace-* family of methods + if filter is not None: + filter_locs = set(self.items.get_indexer_for(filter)) + if len(filter_locs) == len(self.items): + # All items are included, as if there were no filtering + filter = None + else: + kwargs['filter'] = filter_locs + + if f == 'where' and kwargs.get('align', True): + align_copy = True + align_keys = ['other', 'cond'] + elif f == 'putmask' and kwargs.get('align', True): + align_copy = False + align_keys = ['new', 'mask'] + elif f == 'eval': + align_copy = False + align_keys = ['other'] + elif f == 'fillna': + # fillna internally does putmask, maybe it's better to do this + # at mgr, not block level? + align_copy = False + align_keys = ['value'] + else: + align_keys = [] + + aligned_args = dict((k, kwargs[k]) for k in align_keys + if hasattr(kwargs[k], 'reindex_axis')) + + for b in self.blocks: if filter is not None: - kwargs['filter'] = set(filter) - if not blk.items.isin(filter).any(): - result_blocks.append(blk) + if not b.mgr_locs.isin(filter_locs).any(): + result_blocks.append(b) continue - applied = getattr(blk, f)(**kwargs) + + if aligned_args: + b_items = self.items[b.mgr_locs.indexer] + + for k, obj in aligned_args.items(): + axis = getattr(obj, '_info_axis_number', 0) + kwargs[k] = obj.reindex_axis(b_items, axis=axis, + copy=align_copy) + + applied = getattr(b, f)(**kwargs) if isinstance(applied, list): result_blocks.extend(applied) else: result_blocks.append(applied) + if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, @@ -2527,7 +2246,7 @@ def comp(s): else: # get our mask for this element, sized to this # particular block - m = masks[i][b.ref_locs] + m = masks[i][b.mgr_locs.indexer] if m.any(): new_rb.extend(b.putmask(m, d, inplace=True)) else: @@ -2539,31 +2258,6 @@ def comp(s): bm._consolidate_inplace() return bm - def prepare_for_merge(self, **kwargs): - """ prepare for merging, return a new block manager with - Sparse -> Dense - """ - self._consolidate_inplace() - if self._has_sparse: - return self.apply('prepare_for_merge', **kwargs) - return self - - def post_merge(self, objs, **kwargs): - """ try to sparsify items that were previously sparse """ - is_sparse = defaultdict(list) - for o in objs: - for blk in o._data.blocks: - if blk.is_sparse: - - # record the dtype of each item - for i in blk.items: - is_sparse[i].append(blk.dtype) - - if len(is_sparse): - return self.apply('post_merge', items=is_sparse) - - return self - def is_consolidated(self): """ Return True if more than one block with the same dtype @@ -2576,10 +2270,6 @@ def _consolidate_check(self): ftypes = [blk.ftype for blk in self.blocks] self._is_consolidated = len(ftypes) == len(set(ftypes)) self._known_consolidated = True - self._set_has_sparse() - - def _set_has_sparse(self): - self._has_sparse = any((blk.is_sparse for blk in self.blocks)) @property def is_mixed_type(self): @@ -2599,163 +2289,66 @@ def is_datelike_mixed_type(self): self._consolidate_inplace() return any([block.is_datelike for block in self.blocks]) - def get_block_map(self, copy=False, typ=None, columns=None, - is_numeric=False, is_bool=False): - """ return a dictionary mapping the ftype -> block list - - Parameters - ---------- - typ : return a list/dict - copy : copy if indicated - columns : a column filter list - filter if the type is indicated """ - - # short circuit - mainly for merging - if (typ == 'dict' and columns is None and not is_numeric and - not is_bool and not copy): - bm = defaultdict(list) - for b in self.blocks: - bm[str(b.ftype)].append(b) - return bm - + def get_bool_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_bool], copy) - if is_numeric: - filter_blocks = lambda block: block.is_numeric - elif is_bool: - filter_blocks = lambda block: block.is_bool - else: - filter_blocks = lambda block: True - - def filter_columns(b): - if columns: - if not columns in b.items: - return None - b = b.reindex_items_from(columns) - return b - - maybe_copy = lambda b: b.copy() if copy else b - - def maybe_copy(b): - if copy: - b = b.copy() - return b - - if typ == 'list': - bm = [] - for b in self.blocks: - if filter_blocks(b): - b = filter_columns(b) - if b is not None: - bm.append(maybe_copy(b)) - - else: - if typ == 'dtype': - key = lambda b: b.dtype - else: - key = lambda b: b.ftype - bm = defaultdict(list) - for b in self.blocks: - if filter_blocks(b): - b = filter_columns(b) - if b is not None: - bm[str(key(b))].append(maybe_copy(b)) - return bm - - def get_bool_data(self, **kwargs): - kwargs['is_bool'] = True - return self.get_data(**kwargs) - - def get_numeric_data(self, **kwargs): - kwargs['is_numeric'] = True - return self.get_data(**kwargs) - - def get_data(self, copy=False, columns=None, **kwargs): + def get_numeric_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ - blocks = self.get_block_map( - typ='list', copy=copy, columns=columns, **kwargs) - if len(blocks) == 0: - return self.make_empty() - - return self.combine(blocks, copy=copy) + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_numeric], copy) def combine(self, blocks, copy=True): """ return a new manager with the blocks """ - indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) + if len(blocks) == 0: + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) + inv_indexer = _invert_reordering(indexer) new_items = self.items.take(indexer) new_blocks = [] for b in blocks: - b = b.reindex_items_from(new_items, copy=copy) - new_blocks.extend(_valid_blocks(b)) + b = b.copy(deep=copy) + b.mgr_locs = inv_indexer.take(b.mgr_locs.as_array) + new_blocks.append(b) + new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(new_blocks, new_axes, do_integrity_check=False) def get_slice(self, slobj, axis=0): - new_axes = list(self.axes) - - new_axes[axis] = new_axes[axis][slobj] + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") if axis == 0: - new_items = new_axes[0] - - # we want to preserver the view of a single-block - if len(self.blocks) == 1: - - blk = self.blocks[0] - ref_locs = blk.take_ref_locs(slobj) - newb = make_block(blk._slice(slobj), new_items, new_items, - klass=blk.__class__, fastpath=True, - placement=ref_locs) - - new_blocks = [newb] - else: - return self.reindex_items( - new_items, indexer=np.arange(len(self.items))[slobj]) + new_blocks = self._slice_take_blocks_ax0(slobj) else: - new_blocks = self._slice_blocks(slobj, axis) + slicer = [slice(None)] * (axis + 1) + slicer[axis] = slobj + slicer = tuple(slicer) + new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] - bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) + new_axes = list(self.axes) + new_axes[axis] = new_axes[axis][slobj] + + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False, + fastpath=True) bm._consolidate_inplace() return bm - def _slice_blocks(self, slobj, axis): - """ - slice the blocks using the provided slice object - this is only for slicing on axis != 0 - """ - - if axis == 0: - raise AssertionError("cannot _slice_blocks on axis=0") - - slicer = [slice(None, None) for _ in range(self.ndim)] - slicer[axis] = slobj - slicer = tuple(slicer) - is_unique = self.axes[0].is_unique - - def place(block): - if not is_unique: - return block._ref_locs - return None - - return [ make_block(block._slice(slicer), - block.items, - block.ref_items, - klass=block.__class__, - fastpath=True, - placement=place(block) - ) for block in self.blocks ] - - def get_series_dict(self): - # For DataFrame - return _blocks_to_series_dict(self.blocks, self.axes[1]) - def __contains__(self, item): return item in self.items @@ -2781,55 +2374,49 @@ def copy(self, deep=True): else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, - ref_items=new_axes[0], do_integrity_check=False) + do_integrity_check=False) def as_matrix(self, items=None): if len(self.blocks) == 0: - mat = np.empty(self.shape, dtype=float) - elif len(self.blocks) == 1: - blk = self.blocks[0] - if items is None or blk.items.equals(items): - # if not, then just call interleave per below - mat = blk.get_values() - else: - mat = self.reindex_items(items).as_matrix() + return np.empty(self.shape, dtype=float) + + if items is not None: + mgr = self.reindex_axis(items, axis=0) else: - if items is None: - mat = self._interleave(self.items) - else: - mat = self.reindex_items(items).as_matrix() + mgr = self - return mat + if self._is_single_block: + return mgr.blocks[0].get_values() + else: + return mgr._interleave() - def _interleave(self, items): + def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ dtype = _interleaved_dtype(self.blocks) - items = _ensure_index(items) result = np.empty(self.shape, dtype=dtype) - itemmask = np.zeros(len(items), dtype=bool) - # By construction, all of the item should be covered by one of the - # blocks - if items.is_unique: + if result.shape[0] == 0: + # Workaround for numpy 1.7 bug: + # + # >>> a = np.empty((0,10)) + # >>> a[slice(0,0)] + # array([], shape=(0, 10), dtype=float64) + # >>> a[[]] + # Traceback (most recent call last): + # File "<stdin>", line 1, in <module> + # IndexError: index 0 is out of bounds for axis 0 with size 0 + return result - for block in self.blocks: - indexer = items.get_indexer(block.items) - if (indexer == -1).any(): - raise AssertionError('Items must contain all block items') - result[indexer] = block.get_values(dtype) - itemmask[indexer] = 1 + itemmask = np.zeros(self.shape[0]) - else: - - # non-unique, must use ref_locs - rl = self._set_ref_locs() - for i, (block, idx) in enumerate(rl): - result[i] = block.get_values(dtype)[idx] - itemmask[i] = 1 + for blk in self.blocks: + rl = blk.mgr_locs + result[rl.indexer] = blk.get_values(dtype) + itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') @@ -2863,22 +2450,17 @@ def xs(self, key, axis=1, copy=True, takeable=False): if len(self.blocks) > 1: # we must copy here as we are mixed type for blk in self.blocks: - newb = make_block(blk.values[slicer], - blk.items, - blk.ref_items, - klass=blk.__class__, - fastpath=True) + newb = make_block(values=blk.values[slicer], + klass=blk.__class__, fastpath=True, + placement=blk.mgr_locs) new_blocks.append(newb) elif len(self.blocks) == 1: block = self.blocks[0] vals = block.values[slicer] if copy: vals = vals.copy() - new_blocks = [make_block(vals, - self.items, - self.items, - klass=block.__class__, - fastpath=True)] + new_blocks = [make_block(values=vals, placement=block.mgr_locs, + klass=block.__class__, fastpath=True,)] return self.__class__(new_blocks, new_axes) @@ -2897,7 +2479,7 @@ def fast_xs(self, loc): # non-unique (GH4726) if not items.is_unique: - result = self._interleave(items) + result = self._interleave() if self.ndim == 2: result = result.T return result[loc] @@ -2907,9 +2489,10 @@ def fast_xs(self, loc): n = len(items) result = np.empty(n, dtype=dtype) for blk in self.blocks: - for j, item in enumerate(blk.items): - i = items.get_loc(item) - result[i] = blk._try_coerce_result(blk.iget((j, loc))) + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk._try_coerce_result(blk.iget((i, loc))) return result @@ -2930,112 +2513,92 @@ def consolidate(self): def _consolidate_inplace(self): if not self.is_consolidated(): - self.blocks = _consolidate(self.blocks, self.items) - - # reset our mappings - if not self.items.is_unique: - self._ref_locs = None - self._items_map = None - self._set_ref_locs(do_refs=True) + self.blocks = tuple(_consolidate(self.blocks)) self._is_consolidated = True self._known_consolidated = True - self._set_has_sparse() + self._rebuild_blknos_and_blklocs() def get(self, item): + """ + Return values for selected item (ndarray or BlockManager). + """ if self.items.is_unique: - if isnull(item): + if not isnull(item): + loc = self.items.get_loc(item) + else: indexer = np.arange(len(self.items))[isnull(self.items)] - return self.get_for_nan_indexer(indexer) - _, block = self._find_block(item) - return block.get(item) + # allow a single nan location indexer + if not np.isscalar(indexer): + if len(indexer) == 1: + loc = indexer.item() + else: + raise ValueError("cannot label index with a null key") + + return self.iget(loc) else: if isnull(item): raise ValueError("cannot label index with a null key") - indexer = self.items.get_loc(item) - ref_locs = np.array(self._set_ref_locs()) - - # duplicate index but only a single result - if com.is_integer(indexer): - - b, loc = ref_locs[indexer] - values = [b.iget(loc)] - index = Index([self.items[indexer]]) - - # we have a multiple result, potentially across blocks - else: - - values = [block.iget(i) for block, i in ref_locs[indexer]] - index = self.items[indexer] - - # create and return a new block manager - axes = [index] + self.axes[1:] - blocks = form_blocks(values, index, axes) - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr + indexer = self.items.get_indexer_for([item]) + return self.reindex_indexer(new_axis=self.items[indexer], + indexer=indexer, axis=0, allow_dups=True) def iget(self, i): - item = self.items[i] - - # unique - if self.items.is_unique: - if notnull(item): - return self.get(item) - return self.get_for_nan_indexer(i) - - ref_locs = self._set_ref_locs() - b, loc = ref_locs[i] - return b.iget(loc) - - def get_for_nan_indexer(self, indexer): - - # allow a single nan location indexer - if not np.isscalar(indexer): - if len(indexer) == 1: - indexer = indexer.item() - else: - raise ValueError("cannot label index with a null key") - - # take a nan indexer and return the values - ref_locs = self._set_ref_locs(do_refs='force') - b, loc = ref_locs[indexer] - return b.iget(loc) + return self.blocks[self._blknos[i]].iget(self._blklocs[i]) def get_scalar(self, tup): """ Retrieve single item """ - item = tup[0] - _, blk = self._find_block(item) + full_loc = list(ax.get_loc(x) + for ax, x in zip(self.axes, tup)) + blk = self.blocks[self._blknos[full_loc[0]]] + full_loc[0] = self._blklocs[full_loc[0]] - # this could obviously be seriously sped up in cython - item_loc = blk.items.get_loc(item), - full_loc = item_loc + tuple(ax.get_loc(x) - for ax, x in zip(self.axes[1:], tup[1:])) - return blk.values[full_loc] + # FIXME: this may return non-upcasted types? + return blk.values[tuple(full_loc)] def delete(self, item): + """ + Delete selected item (items if non-unique) in-place. + """ + indexer = self.items.get_loc(item) - is_unique = self.items.is_unique - loc = self.items.get_loc(item) - - # dupe keys may return mask - loc = _possibly_convert_to_indexer(loc) - self._delete_from_all_blocks(loc, item) + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + ref_loc_offset = -is_deleted.cumsum() - # _ref_locs, and _items_map are good here - new_items = self.items.delete(loc) - self.set_items_norename(new_items) + is_blk_deleted = [False] * len(self.blocks) - self._known_consolidated = False - - if not is_unique: - self._consolidate_inplace() + if isinstance(indexer, int): + affected_start = indexer + else: + affected_start = is_deleted.nonzero()[0][0] + + for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): + blk = self.blocks[blkno] + bml = blk.mgr_locs + blk_del = is_deleted[bml.indexer].nonzero()[0] + + if len(blk_del) == len(bml): + is_blk_deleted[blkno] = True + continue + elif len(blk_del) != 0: + blk.delete(blk_del) + bml = blk.mgr_locs + + blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) + + # FIXME: use Index.delete as soon as it uses fastpath=True + self.axes[0] = self.items[~is_deleted] + self.blocks = tuple(b for blkno, b in enumerate(self.blocks) + if not is_blk_deleted[blkno]) + self._shape = None + self._rebuild_blknos_and_blklocs() def set(self, item, value, check=False): """ @@ -3043,508 +2606,345 @@ def set(self, item, value, check=False): contained in the current set of items if check, then validate that we are not setting the same data in-place """ - if not isinstance(value, SparseArray): + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + value_is_sparse = isinstance(value, SparseArray) + + if value_is_sparse: + assert self.ndim == 2 + + def value_getitem(placement): + return value + else: if value.ndim == self.ndim - 1: value = value.reshape((1,) + value.shape) + + def value_getitem(placement): + return value + else: + def value_getitem(placement): + return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') - def _set_item(item, arr): - i, block = self._find_block(item) - if not block.should_store(value): - # delete from block, create and append new block - self._delete_from_block(i, item) - self._add_new_block(item, arr, loc=None) - else: - block.set(item, arr, check=check) - try: - loc = self.items.get_loc(item) - if isinstance(loc, int): - _set_item(self.items[loc], value) + except KeyError: + # This item wasn't present, just insert at end + self.insert(len(self.items), item, value) + return + + if isinstance(loc, int): + loc = [loc] + + blknos = self._blknos[loc] + blklocs = self._blklocs[loc] + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), + group=True): + blk = self.blocks[blkno] + blk_locs = blklocs[val_locs.indexer] + if blk.should_store(value): + blk.set(blk_locs, value_getitem(val_locs), check=check) else: - subset = self.items[loc] - if len(value) != len(subset): - raise AssertionError( - 'Number of items to set did not match') + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) - # we are inserting multiple non-unique items as replacements - # we are inserting one by one, so the index can go from unique - # to non-unique during the loop, need to have _ref_locs defined - # at all times - if np.isscalar(item) and (com.is_list_like(loc) or isinstance(loc, slice)): + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno) + else: + self._blklocs[blk.mgr_locs.indexer] = -1 + blk.delete(blk_locs) + self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.int_) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - + len(removed_blknos)) + self._blknos = new_blknos.take(self._blknos, axis=0) + self.blocks = tuple(blk for i, blk in enumerate(self.blocks) + if i not in set(removed_blknos)) + + if unfit_val_locs: + unfit_mgr_locs = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_mgr_locs) - # first delete from all blocks - self.delete(item) + new_blocks = [] + if value_is_sparse: + # This code (ab-)uses the fact that sparse blocks contain only + # one item. + new_blocks.extend( + make_block(values=value.copy(), ndim=self.ndim, + placement=slice(mgr_loc, mgr_loc + 1)) + for mgr_loc in unfit_mgr_locs) + + self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + + len(self.blocks)) + self._blklocs[unfit_mgr_locs] = 0 - loc = _possibly_convert_to_indexer(loc) - for i, (l, k, arr) in enumerate(zip(loc, subset, value)): + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) - # insert the item - self.insert( - l, k, arr[None, :], allow_duplicates=True) + new_blocks.append( + make_block(values=value_getitem(unfit_val_items), + ndim=self.ndim, placement=unfit_mgr_locs)) - # reset the _ref_locs on indiviual blocks - # rebuild ref_locs - if self.items.is_unique: - self._reset_ref_locs() - self._set_ref_locs(do_refs='force') + self._blknos[unfit_mgr_locs] = len(self.blocks) + self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) - self._rebuild_ref_locs() + self.blocks += tuple(new_blocks) - else: - for i, (item, arr) in enumerate(zip(subset, value)): - _set_item(item, arr[None, :]) - except KeyError: - # insert at end - self.insert(len(self.items), item, value) - - self._known_consolidated = False + # Newly created block's dtype may already be present. + self._known_consolidated = False def insert(self, loc, item, value, allow_duplicates=False): + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : array_like + allow_duplicates: bool + If False, trying to insert non-unique item will raise + """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert %s, already exists' % item) - try: - new_items = self.items.insert(loc, item) - self.set_items_norename(new_items) + if not isinstance(loc, int): + raise TypeError("loc must be int") - # new block - self._add_new_block(item, value, loc=loc) + block = make_block(values=value, + ndim=self.ndim, + placement=slice(loc, loc+1)) - except: - - # so our insertion operation failed, so back out of the new items - # GH 3010 - new_items = self.items.delete(loc) - self.set_items_norename(new_items) - - # re-raise - raise - - if len(self.blocks) > 100: - self._consolidate_inplace() - - self._known_consolidated = False + for blkno, count in _fast_count_smallints(self._blknos[loc:]): + blk = self.blocks[blkno] + if count == len(blk.mgr_locs): + blk.mgr_locs = blk.mgr_locs.add(1) + else: + new_mgr_locs = blk.mgr_locs.as_array.copy() + new_mgr_locs[new_mgr_locs >= loc] += 1 + blk.mgr_locs = new_mgr_locs + + if loc == self._blklocs.shape[0]: + # np.append is a lot faster (at least in numpy 1.7.1), let's use it + # if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + else: + self._blklocs = np.insert(self._blklocs, loc, 0) + self._blknos = np.insert(self._blknos, loc, len(self.blocks)) - # clear the internal ref_loc mappings if necessary - if loc != len(self.items) - 1 and new_items.is_unique: - self.set_items_clear(new_items) + self.axes[0] = self.items.insert(loc, item) - def set_items_norename(self, value): - self.set_axis(0, value, maybe_rename=False, check_axis=False) + self.blocks += (block,) self._shape = None - def set_items_clear(self, value): - """ clear the ref_locs on all blocks """ - self.set_axis(0, value, maybe_rename='clear', check_axis=False) - - def _delete_from_all_blocks(self, loc, item): - """ delete from the items loc the item - the item could be in multiple blocks which could - change each iteration (as we split blocks) """ - - # possibily convert to an indexer - loc = _possibly_convert_to_indexer(loc) - - if isinstance(loc, (list, tuple, np.ndarray)): - for l in loc: - for i, b in enumerate(self.blocks): - if item in b.items: - self._delete_from_block(i, item) + self._known_consolidated = False - else: - i, _ = self._find_block(item) - self._delete_from_block(i, item) + if len(self.blocks) > 100: + self._consolidate_inplace() - def _delete_from_block(self, i, item): + def reindex_axis(self, new_index, axis, method=None, limit=None, + fill_value=None, copy=True): """ - Delete and maybe remove the whole block - - Remap the split blocks to there old ranges, - so after this function, _ref_locs and _items_map (if used) - are correct for the items, None fills holes in _ref_locs + Conform block manager to new index. """ - block = self.blocks.pop(i) - ref_locs = self._set_ref_locs() - prev_items_map = self._items_map.pop( - block) if ref_locs is not None else None - - # if we can't consolidate, then we are removing this block in its - # entirey - if block._can_consolidate: - - # compute the split mask - loc = block.items.get_loc(item) - if type(loc) == slice or com.is_integer(loc): - mask = np.array([True] * len(block)) - mask[loc] = False - else: # already a mask, inverted - mask = -loc - - # split the block - counter = 0 - for s, e in com.split_ranges(mask): - - sblock = make_block(block.values[s:e], - block.items[s:e].copy(), - block.ref_items, - klass=block.__class__, - fastpath=True) - - self.blocks.append(sblock) - - # update the _ref_locs/_items_map - if ref_locs is not None: - - # fill the item_map out for this sub-block - m = maybe_create_block_in_items_map( - self._items_map, sblock) - for j, itm in enumerate(sblock.items): + new_index = _ensure_index(new_index) + new_index, indexer = self.axes[axis].reindex( + new_index, method=method, limit=limit, copy_if_needed=True) - # is this item masked (e.g. was deleted)? - while (True): + return self.reindex_indexer(new_index, indexer, axis=axis, + fill_value=fill_value, copy=copy) - if counter > len(mask) or mask[counter]: - break - else: - counter += 1 - - # find my mapping location - m[j] = prev_items_map[counter] - counter += 1 - - # set the ref_locs in this block - sblock.set_ref_locs(m) - - # reset the ref_locs to the new structure - if ref_locs is not None: - - # items_map is now good, with the original locations - self._set_ref_locs(do_refs=True) - - # reset the ref_locs based on the now good block._ref_locs - self._reset_ref_locs() - - def _add_new_block(self, item, value, loc=None): - # Do we care about dtype at the moment? - - # hm, elaborate hack? - if loc is None: - loc = self.items.get_loc(item) - new_block = make_block(value, self.items[loc:loc + 1].copy(), - self.items, fastpath=True) - self.blocks.append(new_block) - - # set ref_locs based on the this new block - # and add to the ref/items maps - if not self.items.is_unique: - - # insert into the ref_locs at the appropriate location - # _ref_locs is already long enough, - # but may need to shift elements - new_block.set_ref_locs([0]) - - # need to shift elements to the right - if self._ref_locs[loc] is not None: - for i in reversed(lrange(loc + 1, len(self._ref_locs))): - self._ref_locs[i] = self._ref_locs[i - 1] - - self._ref_locs[loc] = (new_block, 0) - - # and reset - self._reset_ref_locs() - self._set_ref_locs(do_refs=True) - - def _find_block(self, item): - self._check_have(item) - for i, block in enumerate(self.blocks): - if item in block: - return i, block - - def _check_have(self, item): - if item not in self.items: - raise KeyError('no item named %s' % com.pprint_thing(item)) - - def reindex_axis(self, new_axis, indexer=None, method=None, axis=0, - fill_value=None, limit=None, copy=True): - new_axis = _ensure_index(new_axis) - cur_axis = self.axes[axis] - - if new_axis.equals(cur_axis): - if copy: - result = self.copy(deep=True) - result.axes[axis] = new_axis - result._shape = None + def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, + allow_dups=False, copy=True): + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray of int64 or None + axis : int + fill_value : object + allow_dups : bool - if axis == 0: - # patch ref_items, #1823 - for blk in result.blocks: - blk.ref_items = new_axis + pandas-indexer with -1's only. + """ - return result - else: + if indexer is None: + if new_axis is self.axes[axis] and not copy: return self - if axis == 0: - if method is not None or limit is not None: - return self.reindex_axis0_with_method( - new_axis, indexer=indexer, method=method, - fill_value=fill_value, limit=limit, copy=copy - ) - return self.reindex_items(new_axis, indexer=indexer, copy=copy, - fill_value=fill_value) - - new_axis, indexer = cur_axis.reindex( - new_axis, method, copy_if_needed=True) - return self.reindex_indexer(new_axis, indexer, axis=axis, - fill_value=fill_value) + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result - def reindex_axis0_with_method(self, new_axis, indexer=None, method=None, - fill_value=None, limit=None, copy=True): - raise AssertionError('method argument not supported for ' - 'axis == 0') + self._consolidate_inplace() - def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=None, - allow_dups=False): - """ - pandas-indexer with -1's only. - """ # trying to reindex on an axis with duplicates - if not allow_dups and not self.axes[axis].is_unique and len(indexer): + if (not allow_dups and not self.axes[axis].is_unique + and len(indexer)): raise ValueError("cannot reindex from a duplicate axis") - if not self.is_consolidated(): - self = self.consolidate() + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") if axis == 0: - return self._reindex_indexer_items(new_axis, indexer, fill_value) - - new_blocks = [] - for block in self.blocks: - newb = block.reindex_axis( - indexer, axis=axis, fill_value=fill_value) - new_blocks.append(newb) + new_blocks = self._slice_take_blocks_ax0( + indexer, fill_tuple=(fill_value,)) + else: + new_blocks = [blk.take_nd(indexer, axis=axis, + fill_tuple=(fill_value if fill_value is not None else + blk.fill_value,)) + for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axis return self.__class__(new_blocks, new_axes) - def _reindex_indexer_items(self, new_items, indexer, fill_value): - # TODO: less efficient than I'd like - - item_order = com.take_1d(self.items.values, indexer) - new_axes = [new_items] + self.axes[1:] - new_blocks = [] - is_unique = new_items.is_unique - - # we have duplicates in the items and what we are reindexing - if not is_unique and not self.items.is_unique: - - rl = self._set_ref_locs(do_refs='force') - for i, idx in enumerate(indexer): - item = new_items.take([i]) - if idx >= 0: - blk, lidx = rl[idx] - blk = make_block(_block_shape(blk.iget(lidx)), item, - new_items, ndim=self.ndim, fastpath=True, - placement=[i]) - - # a missing value - else: - blk = self._make_na_block(item, - new_items, - placement=[i], - fill_value=fill_value) - new_blocks.append(blk) - new_blocks = _consolidate(new_blocks, new_items) - - - # keep track of what items aren't found anywhere - else: - l = np.arange(len(item_order)) - mask = np.zeros(len(item_order), dtype=bool) - - for blk in self.blocks: - blk_indexer = blk.items.get_indexer(item_order) - selector = blk_indexer != -1 - - # update with observed items - mask |= selector - - if not selector.any(): - continue - - new_block_items = new_items.take(selector.nonzero()[0]) - new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0, - allow_fill=False) - placement = l[selector] if not is_unique else None - new_blocks.append(make_block(new_values, - new_block_items, - new_items, - placement=placement, - fastpath=True)) - - if not mask.all(): - na_items = new_items[-mask] - placement = l[-mask] if not is_unique else None - na_block = self._make_na_block(na_items, - new_items, - placement=placement, - fill_value=fill_value) - new_blocks.append(na_block) - new_blocks = _consolidate(new_blocks, new_items) + def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): + """ + Slice/take blocks along axis=0. - return self.__class__(new_blocks, new_axes) + Overloaded for SingleBlock - def reindex_items(self, new_items, indexer=None, copy=True, - fill_value=None): - """ + Returns + ------- + new_blocks : list of Block """ - new_items = _ensure_index(new_items) - data = self - if not data.is_consolidated(): - data = data.consolidate() - return data.reindex_items(new_items, copy=copy, - fill_value=fill_value) - if indexer is None: - new_items, indexer = self.items.reindex(new_items, - copy_if_needed=True) - new_axes = [new_items] + self.axes[1:] + allow_fill = fill_tuple is not None - # could have so me pathological (MultiIndex) issues here - new_blocks = [] - if indexer is None: - for blk in self.blocks: - if copy: - blk = blk.reindex_items_from(new_items) - else: - blk.ref_items = new_items - new_blocks.extend(_valid_blocks(blk)) - else: + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill) - # unique - if self.axes[0].is_unique and new_items.is_unique: + if self._is_single_block: + blk = self.blocks[0] - # ok to use the global indexer if only 1 block - i = indexer if len(self.blocks) == 1 else None + if sl_type in ('slice', 'mask'): + return [blk.getitem_block(slobj, + new_mgr_locs=slice(0, sllen))] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_tuple[0] is None: + _, fill_value = com._maybe_promote(blk.dtype) + fill_tuple = (fill_value,) + + return [blk.take_nd(slobj, axis=0, + new_mgr_locs=slice(0, sllen), + fill_tuple=fill_tuple)] + + if sl_type in ('slice', 'mask'): + blknos = self._blknos[slobj] + blklocs = self._blklocs[slobj] + else: + blknos = com.take_1d(self._blknos, slobj, fill_value=-1, + allow_fill=allow_fill) + blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1, + allow_fill=allow_fill) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + # + # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, + # pytables serialization will break otherwise. + blocks = [] + for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), + group=True): + if blkno == -1: + # If we've got here, fill_tuple was not None. + fill_value = fill_tuple[0] + + blocks.append(self._make_na_block( + placement=mgr_locs, fill_value=fill_value)) + else: + blk = self.blocks[blkno] - for block in self.blocks: - blk = block.reindex_items_from(new_items, indexer=i, copy=copy) - new_blocks.extend(_valid_blocks(blk)) + # Otherwise, slicing along items axis is necessary. + if blk.is_sparse: + # A sparse block, it's easy, because there's only one item + # and each mgr loc is a copy of that single item. + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=True) + newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) + blocks.append(newblk) - # non-unique - else: - rl = self._set_ref_locs(do_refs='force') - for i, idx in enumerate(indexer): - blk, lidx = rl[idx] - item = new_items.take([i]) - blk = make_block(_block_shape(blk.iget(lidx)), item, - new_items, ndim=self.ndim, fastpath=True, - placement=[i]) - new_blocks.append(blk) - - # add a na block if we are missing items - mask = indexer == -1 - if mask.any(): - extra_items = new_items[mask] - na_block = self._make_na_block(extra_items, new_items, - fill_value=fill_value) - new_blocks.append(na_block) - new_blocks = _consolidate(new_blocks, new_items) - - # consolidate - # import for non-unique which creates a block for each item - # and they must be consolidated before passing on - new_blocks = _consolidate(new_blocks, new_items) + else: + blocks.append(blk.take_nd( + blklocs[mgr_locs.indexer], axis=0, + new_mgr_locs=mgr_locs, fill_tuple=None)) - return self.__class__(new_blocks, new_axes) + return blocks - def _make_na_block(self, items, ref_items, placement=None, - fill_value=None): + def _make_na_block(self, placement, fill_value=None): # TODO: infer dtypes other than float64 from fill_value if fill_value is None: fill_value = np.nan block_shape = list(self.shape) - block_shape[0] = len(items) + block_shape[0] = len(placement) dtype, fill_value = com._infer_dtype_from_scalar(fill_value) block_values = np.empty(block_shape, dtype=dtype) block_values.fill(fill_value) - return make_block(block_values, items, ref_items, placement=placement) - - def take(self, indexer, new_index=None, axis=1, verify=True): - if axis < 1: - raise AssertionError('axis must be at least 1, got %d' % axis) + return make_block(block_values, placement=placement) + def take(self, indexer, axis=1, verify=True, convert=True): + """ + Take items along any axis. + """ self._consolidate_inplace() - if isinstance(indexer, list): - indexer = np.array(indexer) + indexer = np.asanyarray(indexer, dtype=np.int_) - indexer = com._ensure_platform_int(indexer) - n = len(self.axes[axis]) + n = self.shape[axis] + if convert: + indexer = _maybe_convert_indices(indexer, n) if verify: - indexer = _maybe_convert_indices(indexer, n) if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') - new_axes = list(self.axes) - if new_index is None: - new_index = self.axes[axis].take(indexer) - - new_axes[axis] = new_index - return self.apply('take', - axes=new_axes, - indexer=indexer, - ref_items=new_axes[0], - new_axis=new_axes[axis], - axis=axis) - - def merge(self, other, lsuffix=None, rsuffix=None): + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer(new_axis=new_labels, indexer=indexer, + axis=axis, allow_dups=True) + + def merge(self, other, lsuffix='', rsuffix=''): if not self._is_indexed_like(other): raise AssertionError('Must have same axes to merge managers') - this, other = self._maybe_rename_join(other, lsuffix, rsuffix) - - cons_items = this.items + other.items - new_axes = list(this.axes) - new_axes[0] = cons_items - - consolidated = _consolidate(this.blocks + other.blocks, cons_items) - return self.__class__(consolidated, new_axes) + l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, + right=other.items, rsuffix=rsuffix) + new_items = _concat_indexes([l, r]) - def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): - to_rename = self.items.intersection(other.items) - if len(to_rename) > 0: - if not lsuffix and not rsuffix: - raise ValueError('columns overlap but no suffix specified: %s' - % to_rename) + new_blocks = [blk.copy(deep=False) + for blk in self.blocks] - def lrenamer(x): - if x in to_rename: - return '%s%s' % (x, lsuffix) - return x + offset = self.shape[0] + for blk in other.blocks: + blk = blk.copy(deep=False) + blk.mgr_locs = blk.mgr_locs.add(offset) + new_blocks.append(blk) - def rrenamer(x): - if x in to_rename: - return '%s%s' % (x, rsuffix) - return x - - this = self.rename_items(lrenamer, copy=copydata) - other = other.rename_items(rrenamer, copy=copydata) - else: - this = self + new_axes = list(self.axes) + new_axes[0] = new_items - return this, other + return self.__class__(_consolidate(new_blocks), new_axes) def _is_indexed_like(self, other): """ @@ -3558,83 +2958,6 @@ def _is_indexed_like(self, other): return False return True - def rename(self, mapper, axis, copy=False): - """ generic rename """ - - if axis == 0: - return self.rename_items(mapper, copy=copy) - return self.rename_axis(mapper, axis=axis) - - def rename_axis(self, mapper, axis=1): - - index = self.axes[axis] - if isinstance(index, MultiIndex): - new_axis = MultiIndex.from_tuples( - [tuple(mapper(y) for y in x) for x in index], - names=index.names) - else: - new_axis = Index([mapper(x) for x in index], name=index.name) - - if not new_axis.is_unique: - raise AssertionError('New axis must be unique to rename') - - new_axes = list(self.axes) - new_axes[axis] = new_axis - return self.__class__(self.blocks, new_axes) - - def rename_items(self, mapper, copy=True): - if isinstance(self.items, MultiIndex): - items = [tuple(mapper(y) for y in x) for x in self.items] - new_items = MultiIndex.from_tuples(items, names=self.items.names) - else: - items = [mapper(x) for x in self.items] - new_items = Index(items, name=self.items.name) - - new_blocks = [] - for block in self.blocks: - newb = block.copy(deep=copy) - newb.set_ref_items(new_items, maybe_rename=True) - new_blocks.append(newb) - new_axes = list(self.axes) - new_axes[0] = new_items - return self.__class__(new_blocks, new_axes) - - def add_prefix(self, prefix): - f = (('%s' % prefix) + '%s').__mod__ - return self.rename_items(f) - - def add_suffix(self, suffix): - f = ('%s' + ('%s' % suffix)).__mod__ - return self.rename_items(f) - - @property - def block_id_vector(self): - # TODO - result = np.empty(len(self.items), dtype=int) - result.fill(-1) - - for i, blk in enumerate(self.blocks): - indexer = self.items.get_indexer(blk.items) - if (indexer == -1).any(): - raise AssertionError('Block items must be in manager items') - result.put(indexer, i) - - if (result < 0).any(): - raise AssertionError('Some items were not in any block') - return result - - @property - def item_dtypes(self): - result = np.empty(len(self.items), dtype='O') - mask = np.zeros(len(self.items), dtype=bool) - for i, blk in enumerate(self.blocks): - indexer = self.items.get_indexer(blk.items) - result.put(indexer, blk.dtype.name) - mask.put(indexer, 1) - if not (mask.all()): - raise AssertionError('Some items were not in any block') - return result - def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): @@ -3646,16 +2969,16 @@ def equals(self, other): return all(block.equals(oblock) for block, oblock in zip(self.blocks, other.blocks)) -class SingleBlockManager(BlockManager): +class SingleBlockManager(BlockManager): """ manage a single block with """ + ndim = 1 _is_consolidated = True _known_consolidated = True - __slots__ = ['axes', 'blocks', '_block', - '_values', '_shape', '_has_sparse'] + __slots__ = () - def __init__(self, block, axis, do_integrity_check=False, fastpath=True): + def __init__(self, block, axis, do_integrity_check=False, fastpath=False): if isinstance(axis, list): if len(axis) != 1: @@ -3675,11 +2998,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=True): raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] - if not isinstance(block, Block): - block = make_block(block, axis, axis, ndim=1, fastpath=True) - else: - self.axes = [_ensure_index(axis)] # create the block here @@ -3689,103 +3008,76 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=True): if len(block) > 1: dtype = _interleaved_dtype(block) block = [b.astype(dtype) for b in block] - block = _consolidate(block, axis) + block = _consolidate(block) if len(block) != 1: raise ValueError('Cannot create SingleBlockManager with ' 'more than 1 block') block = block[0] - if not isinstance(block, Block): - block = make_block(block, axis, axis, ndim=1, fastpath=True) + if not isinstance(block, Block): + block = make_block(block, + placement=slice(0, len(axis)), + ndim=1, fastpath=True) self.blocks = [block] - self._block = self.blocks[0] - self._values = self._block.values - self._has_sparse = self._block.is_sparse def _post_setstate(self): - self._block = self.blocks[0] - self._values = self._block.values - - def _get_counts(self, f): - return { f(self._block) : 1 } - - @property - def shape(self): - if getattr(self, '_shape', None) is None: - self._shape = tuple([len(self.axes[0])]) - return self._shape + pass - def apply(self, f, axes=None, do_integrity_check=False, **kwargs): - """ - fast path for SingleBlock Manager + @property + def _block(self): + return self.blocks[0] - ssee also BlockManager.apply - """ - applied = getattr(self._block, f)(**kwargs) - bm = self.__class__(applied, axes or self.axes, - do_integrity_check=do_integrity_check) - bm._consolidate_inplace() - return bm + @property + def _values(self): + return self._block.values def reindex(self, new_axis, indexer=None, method=None, fill_value=None, limit=None, copy=True): # if we are the same and don't copy, just return - if not copy and self.index.equals(new_axis): - return self + if self.index.equals(new_axis): + if copy: + return self.copy(deep=True) + else: + return self - block = self._block.reindex_items_from(new_axis, indexer=indexer, - method=method, - fill_value=fill_value, - limit=limit, copy=copy) - mgr = SingleBlockManager(block, new_axis) - mgr._consolidate_inplace() - return mgr + values = self._block.get_values() - def _reindex_indexer_items(self, new_items, indexer, fill_value): - # equiv to a reindex - return self.reindex(new_items, indexer=indexer, fill_value=fill_value, - copy=False) + if indexer is None: + indexer = self.items.get_indexer_for(new_axis) - def reindex_axis0_with_method(self, new_axis, indexer=None, method=None, - fill_value=None, limit=None, copy=True): - return self.reindex(new_axis, indexer=indexer, method=method, - fill_value=fill_value, limit=limit, copy=copy) + if fill_value is None: + # FIXME: is fill_value used correctly in sparse blocks? + if not self._block.is_sparse: + fill_value = self._block.fill_value + else: + fill_value = np.nan - def _delete_from_block(self, i, item): - super(SingleBlockManager, self)._delete_from_block(i, item) + new_values = com.take_1d(values, indexer, + fill_value=fill_value) - # possibly need to merge split blocks - if len(self.blocks) > 1: - new_items = Index(list(itertools.chain(*[ b.items for b in self.blocks ]))) - block = make_block(np.concatenate([ b.values for b in self.blocks ]), - new_items, - new_items, - dtype=self._block.dtype) + # fill if needed + if method is not None or limit is not None: + new_values = com.interpolate_2d(new_values, method=method, + limit=limit, fill_value=fill_value) - elif len(self.blocks): - block = self.blocks[0] - else: - block = make_block(np.array([], dtype=self._block.dtype), [], []) + if self._block.is_sparse: + make_block = self._block.make_block_same_class - self.blocks = [block] - self._block = block - self._values = self._block.values + block = make_block(new_values, copy=copy, + placement=slice(0, len(new_axis))) - def get_slice(self, slobj): - return self.__class__(self._block._slice(slobj), - self.index[slobj], fastpath=True) + mgr = SingleBlockManager(block, new_axis) + mgr._consolidate_inplace() + return mgr - def set_axis(self, axis, value, maybe_rename=True, check_axis=True): - cur_axis, value = self._set_axis(axis, value, check_axis) - self._block.set_ref_items(self.items, maybe_rename=maybe_rename) + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") - def set_ref_items(self, ref_items, maybe_rename=True): - """ we can optimize and our ref_locs are always equal to ref_items """ - if maybe_rename: - self.items = ref_items - self.ref_items = ref_items + return self.__class__(self._block._slice(slobj), + self.index[slobj], fastpath=True) @property def index(self): @@ -3804,6 +3096,18 @@ def dtype(self): def ftype(self): return self._block.ftype + def get_dtype_counts(self): + return {self.dtype.name: 1} + + def get_ftype_counts(self): + return {self.ftype: 1} + + def get_dtypes(self): + return np.array([self._block.dtype]) + + def get_ftypes(self): + return np.array([self._block.ftype]) + @property def values(self): return self._values.view() @@ -3825,6 +3129,16 @@ def _consolidate_check(self): def _consolidate_inplace(self): pass + def delete(self, item): + """ + Delete single item from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + loc = self.items.get_loc(item) + self._block.delete(loc) + self.axes[0] = self.axes[0].delete(loc) + def fast_xs(self, loc): """ fast path for getting a cross-section @@ -3832,6 +3146,7 @@ def fast_xs(self, loc): """ return self._block.values[loc] + def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) @@ -3841,14 +3156,15 @@ def construction_error(tot_items, block_shape, axes, e=None): raise ValueError("Shape of passed values is {0}, indices imply {1}".format( passed,implied)) + def create_block_manager_from_blocks(blocks, axes): try: - - # if we are passed values, make the blocks if len(blocks) == 1 and not isinstance(blocks[0], Block): - placement = None if axes[0].is_unique else np.arange(len(axes[0])) - blocks = [ - make_block(blocks[0], axes[0], axes[0], placement=placement)] + # It's OK if a single block is passed as values, its placement is + # basically "all items", but if there're many, don't bother + # converting, it's an error anyway. + blocks = [make_block(values=blocks[0], + placement=slice(0, len(axes[0])))] mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() @@ -3870,26 +3186,7 @@ def create_block_manager_from_arrays(arrays, names, axes): construction_error(len(arrays), arrays[0].shape[1:], axes, e) -def maybe_create_block_in_items_map(im, block): - """ create/return the block in an items_map """ - try: - return im[block] - except: - im[block] = l = [None] * len(block.items) - return l - - def form_blocks(arrays, names, axes): - - # pre-filter out items if we passed it - items = axes[0] - - if len(arrays) < len(items): - nn = set(names) - extra_items = Index([i for i in items if i not in nn]) - else: - extra_items = [] - # put "leftover" items in float bucket, where else? # generalize? float_items = [] @@ -3899,8 +3196,23 @@ def form_blocks(arrays, names, axes): object_items = [] sparse_items = [] datetime_items = [] + extra_locs = [] + + names_idx = Index(names) + if names_idx.equals(axes[0]): + names_indexer = np.arange(len(names_idx)) + else: + assert names_idx.intersection(axes[0]).is_unique + names_indexer = names_idx.get_indexer_for(axes[0]) + + for i, name_idx in enumerate(names_indexer): + if name_idx == -1: + extra_locs.append(i) + continue + + k = names[name_idx] + v = arrays[name_idx] - for i, (k, v) in enumerate(zip(names, arrays)): if isinstance(v, (SparseArray, ABCSparseSeries)): sparse_items.append((i, k, v)) elif issubclass(v.dtype.type, np.floating): @@ -3927,72 +3239,67 @@ def form_blocks(arrays, names, axes): else: object_items.append((i, k, v)) - is_unique = items.is_unique blocks = [] if len(float_items): - float_blocks = _multi_blockify(float_items, items, is_unique=is_unique) + float_blocks = _multi_blockify(float_items) blocks.extend(float_blocks) if len(complex_items): complex_blocks = _simple_blockify( - complex_items, items, np.complex128, is_unique=is_unique) + complex_items, np.complex128) blocks.extend(complex_blocks) if len(int_items): - int_blocks = _multi_blockify(int_items, items, is_unique=is_unique) + int_blocks = _multi_blockify(int_items) blocks.extend(int_blocks) if len(datetime_items): datetime_blocks = _simple_blockify( - datetime_items, items, _NS_DTYPE, is_unique=is_unique) + datetime_items, _NS_DTYPE) blocks.extend(datetime_blocks) if len(bool_items): bool_blocks = _simple_blockify( - bool_items, items, np.bool_, is_unique=is_unique) + bool_items, np.bool_) blocks.extend(bool_blocks) if len(object_items) > 0: object_blocks = _simple_blockify( - object_items, items, np.object_, is_unique=is_unique) + object_items, np.object_) blocks.extend(object_blocks) if len(sparse_items) > 0: - sparse_blocks = _sparse_blockify(sparse_items, items) + sparse_blocks = _sparse_blockify(sparse_items) blocks.extend(sparse_blocks) - if len(extra_items): - shape = (len(extra_items),) + tuple(len(x) for x in axes[1:]) + if len(extra_locs): + shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) # empty items -> dtype object block_values = np.empty(shape, dtype=object) block_values.fill(np.nan) - placement = None if is_unique else np.arange(len(extra_items)) - na_block = make_block( - block_values, extra_items, items, placement=placement) + na_block = make_block(block_values, placement=extra_locs) blocks.append(na_block) return blocks -def _simple_blockify(tuples, ref_items, dtype, is_unique=True): +def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ - block_items, values, placement = _stack_arrays(tuples, ref_items, dtype) + values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) - if is_unique: - placement = None - block = make_block(values, block_items, ref_items, placement=placement) + block = make_block(values, placement=placement) return [block] -def _multi_blockify(tuples, ref_items, dtype=None, is_unique=True): +def _multi_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes """ # group by dtype @@ -4001,37 +3308,32 @@ def _multi_blockify(tuples, ref_items, dtype=None, is_unique=True): new_blocks = [] for dtype, tup_block in grouper: - block_items, values, placement = _stack_arrays( - list(tup_block), ref_items, dtype) - if is_unique: - placement = None - block = make_block(values, block_items, ref_items, placement=placement) + values, placement = _stack_arrays( + list(tup_block), dtype) + + block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks -def _sparse_blockify(tuples, ref_items, dtype=None): +def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for i, names, array in tuples: - - if not isinstance(names, (list, tuple)): - names = [names] - items = ref_items[ref_items.isin(names)] - array = _maybe_to_sparse(array) block = make_block( - array, items, ref_items, klass=SparseBlock, fastpath=True) + array, klass=SparseBlock, fastpath=True, + placement=[i]) new_blocks.append(block) return new_blocks -def _stack_arrays(tuples, ref_items, dtype): +def _stack_arrays(tuples, dtype): # fml def _asarray_compat(x): @@ -4055,33 +3357,7 @@ def _shape_compat(x): for i, arr in enumerate(arrays): stacked[i] = _asarray_compat(arr) - # index may box values - if ref_items.is_unique: - items = ref_items[ref_items.isin(names)] - else: - # a mi - if isinstance(ref_items, MultiIndex): - names = MultiIndex.from_tuples(names) - items = ref_items[ref_items.isin(names)] - - # plain old dups - else: - items = _ensure_index([n for n in names if n in ref_items]) - if len(items) != len(stacked): - raise ValueError("invalid names passed _stack_arrays") - - return items, stacked, placement - - -def _blocks_to_series_dict(blocks, index=None): - from pandas.core.series import Series - - series_dict = {} - - for block in blocks: - for item, vec in zip(block.items, block.values): - series_dict[item] = Series(vec, index=index, name=item) - return series_dict + return stacked, placement def _interleaved_dtype(blocks): @@ -4143,7 +3419,7 @@ def _lcd_dtype(l): return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock]) -def _consolidate(blocks, items): +def _consolidate(blocks): """ Merge blocks having same dtype, exclude non-consolidating blocks """ @@ -4154,7 +3430,7 @@ def _consolidate(blocks, items): new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: - merged_blocks = _merge_blocks(list(group_blocks), items, dtype=dtype, + merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) if isinstance(merged_blocks, list): new_blocks.extend(merged_blocks) @@ -4164,14 +3440,7 @@ def _consolidate(blocks, items): return new_blocks -def _valid_blocks(newb): - if newb is None: - return [] - if not isinstance(newb, list): - newb = [ newb ] - return [ b for b in newb if len(b.items) > 0 ] - -def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True): +def _merge_blocks(blocks, dtype=None, _can_consolidate=True): if len(blocks) == 1: return blocks[0] @@ -4182,22 +3451,17 @@ def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True): raise AssertionError("_merge_blocks are invalid!") dtype = blocks[0].dtype - if not items.is_unique: - blocks = sorted(blocks, key=lambda b: b.ref_locs.tolist()) - + # FIXME: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) new_values = _vstack([b.values for b in blocks], dtype) - new_items = blocks[0].items.append([b.items for b in blocks[1:]]) - new_block = make_block(new_values, new_items, items) - # unique, can reindex - if items.is_unique: - return new_block.reindex_items_from(items) + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] - # merge the ref_locs - new_ref_locs = [b._ref_locs for b in blocks] - if all([x is not None for x in new_ref_locs]): - new_block.set_ref_locs(np.concatenate(new_ref_locs)) - return new_block + return make_block(new_values, + fastpath=True, placement=new_mgr_locs) # no merge return blocks @@ -4223,14 +3487,6 @@ def _vstack(to_stack, dtype): return np.vstack(to_stack) -def _possibly_convert_to_indexer(loc): - if com._is_bool_indexer(loc): - loc = [i for i, v in enumerate(loc) if v] - elif isinstance(loc, slice): - loc = lrange(loc.start, loc.stop) - return loc - - def _possibly_compare(a, b, op): res = op(a, b) is_a_array = isinstance(a, np.ndarray) @@ -4246,3 +3502,594 @@ def _possibly_compare(a, b, op): raise TypeError("Cannot compare types %r and %r" % tuple(type_names)) return res + + + + +def _concat_indexes(indexes): + return indexes[0].append(indexes[1:]) + + +def _invert_reordering(reordering, minlength=None): + """ + Invert reordering operation. + + Given array `reordering`, make `reordering_inv` of it, such that:: + + reordering_inv[reordering[x]] = x + + There are two types of indexers: + + source + is when element *s* at position *i* means that values to fill *i-th* + item of reindex operation should be taken from *s-th* item of the + original (this is what is returned by `pandas.Index.reindex`). + destination + is when element *d* at position *i* means that values from *i-th* item + of source should be used to fill *d-th* item of reindexing operation. + + This function will convert from *source* to *destination* and vice-versa. + + .. note:: trailing ``-1`` may be lost upon conversion (this is what + `minlength` is there for). + + .. note:: if *source* indexer is not unique, corresponding *destination* + indexer will have ``dtype=object`` and will contain lists. + + Examples: + + >>> _invert_reordering([3, -1, 2, 4, -1]) + array([-1, -1, 2, 0, 3]) + >>> _invert_reordering([-1, -1, 0, 2, 3]) + array([3, -1, 2, 4]) + >>> _invert_reordering([1,3,5]) + array([-1, 0, -1, 1, -1, 2]) + + """ + reordering = np.asanyarray(reordering) + if not com.is_integer_dtype(reordering): + raise ValueError("Only integer indexers are supported") + + nonneg_indices = reordering[reordering >= 0] + counts = np.bincount(nonneg_indices, minlength=minlength) + has_non_unique = (counts > 1).any() + + dtype = np.dtype(np.object_) if has_non_unique else np.dtype(np.int_) + inverted = np.empty_like(counts, dtype=dtype) + inverted.fill(-1) + + nonneg_positions = np.arange(len(reordering), dtype=np.int_)[reordering >= 0] + np.put(inverted, nonneg_indices, nonneg_positions) + + if has_non_unique: + nonunique_elements = np.arange(len(counts))[counts > 1] + for elt in nonunique_elements: + inverted[elt] = nonneg_positions[nonneg_indices == elt].tolist() + + return inverted + + +def _get_blkno_placements(blknos, blk_count, group=True): + """ + + Parameters + ---------- + blknos : array of int64 + blk_count : int + group : bool + + Returns + ------- + iterator + yield (BlockPlacement, blkno) + + """ + + # FIXME: blk_count is unused, but it may avoid the use of dicts in cython + for blkno, indexer in lib.get_blkno_indexers(blknos, group): + yield blkno, BlockPlacement(indexer) + + +def items_overlap_with_suffix(left, lsuffix, right, rsuffix): + """ + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + else: + if not lsuffix and not rsuffix: + raise ValueError('columns overlap but no suffix specified: %s' % + to_rename) + + def lrenamer(x): + if x in to_rename: + return '%s%s' % (x, lsuffix) + return x + + def rrenamer(x): + if x in to_rename: + return '%s%s' % (x, rsuffix) + return x + + return (_transform_index(left, lrenamer), + _transform_index(right, rrenamer)) + + +def _transform_index(index, func): + """ + Apply function to all values found in index. + + This includes transforming multiindex entries separately. + + """ + if isinstance(index, MultiIndex): + items = [tuple(func(y) for y in x) for x in index] + return MultiIndex.from_tuples(items, names=index.names) + else: + items = [func(x) for x in index] + return Index(items, name=index.name) + + +def _putmask_smart(v, m, n): + """ + Return a new block, try to preserve dtype if possible. + + Parameters + ---------- + v : array_like + m : array_like + n : array_like + """ + + # n should be the length of the mask or a scalar here + if not is_list_like(n): + n = np.array([n] * len(m)) + + # see if we are only masking values that if putted + # will work in the current dtype + try: + nn = n[m] + nn_at = nn.astype(v.dtype) + if (nn == nn_at).all(): + nv = v.copy() + nv[m] = nn_at + return nv + except (ValueError, IndexError, TypeError): + pass + + # change the dtype + dtype, _ = com._maybe_promote(n.dtype) + nv = v.astype(dtype) + try: + nv[m] = n + except ValueError: + idx, = np.where(np.squeeze(m)) + for mask_index, new_val in zip(idx, n): + nv[mask_index] = new_val + return nv + + +def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + """ + concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers) + for mgr, indexers in mgrs_indexers], + concat_axis) + + blocks = [make_block(concatenate_join_units(join_units, concat_axis, + copy=copy), + placement=placement) + for placement, join_units in concat_plan] + + return BlockManager(blocks, axes) + + +def get_empty_dtype_and_na(join_units): + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + na + """ + + if len(join_units) == 1: + blk = join_units[0].block + if blk is None: + return np.float64, np.nan + else: + return blk.dtype, None + + has_none_blocks = False + dtypes = [None] * len(join_units) + + for i, unit in enumerate(join_units): + if unit.block is None: + has_none_blocks = True + else: + dtypes[i] = unit.dtype + + if not has_none_blocks and len(set(dtypes)) == 1: + # Unanimous decision, nothing to upcast. + return dtypes[0], None + + # dtypes = set() + upcast_classes = set() + null_upcast_classes = set() + for dtype, unit in zip(dtypes, join_units): + if dtype is None: + continue + + if issubclass(dtype.type, (np.object_, np.bool_)): + upcast_cls = 'object' + elif is_datetime64_dtype(dtype): + upcast_cls = 'datetime' + elif is_timedelta64_dtype(dtype): + upcast_cls = 'timedelta' + else: + upcast_cls = 'float' + + # Null blocks should not influence upcast class selection, unless there + # are only null blocks, when same upcasting rules must be applied to + # null upcast classes. + if unit.is_null: + null_upcast_classes.add(upcast_cls) + else: + upcast_classes.add(upcast_cls) + + if not upcast_classes: + upcast_classes = null_upcast_classes + + # create the result + if 'object' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'float' in upcast_classes: + return np.dtype(np.float64), np.nan + elif 'datetime' in upcast_classes: + return np.dtype('M8[ns]'), tslib.iNaT + elif 'timedelta' in upcast_classes: + return np.dtype('m8[ns]'), tslib.iNaT + else: # pragma + raise AssertionError("invalid dtype determination in get_concat_dtype") + + +def concatenate_join_units(join_units, concat_axis, copy): + """ + Concatenate values from several join units along selected axis. + """ + if concat_axis == 0 and len(join_units) > 1: + # Concatenating join units along ax0 is handled in _merge_blocks. + raise AssertionError("Concatenating join units along axis0") + + empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) + + to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, + upcasted_na=upcasted_na) + for ju in join_units] + + if len(to_concat) == 1: + # Only one block, nothing to concatenate. + concat_values = to_concat[0] + if copy and concat_values.base is not None: + concat_values = concat_values.copy() + else: + concat_values = com._concat_compat(to_concat, axis=concat_axis) + + # FIXME: optimization potential: if len(join_units) == 1, single join unit + # is densified and sparsified back. + if any(unit.is_sparse for unit in join_units): + # If one of the units was sparse, concat_values are 2d and there's only + # one item. + return SparseArray(concat_values[0]) + else: + return concat_values + + +def get_mgr_concatenation_plan(mgr, indexers): + """ + Construct concatenation plan for given block manager and indexers. + + Parameters + ---------- + mgr : BlockManager + indexers : dict of {axis: indexer} + + Returns + ------- + plan : list of (BlockPlacement, JoinUnit) tuples + + """ + # Calculate post-reindex shape , save for item axis which will be separate + # for each block anyway. + mgr_shape = list(mgr.shape) + for ax, indexer in indexers.items(): + mgr_shape[ax] = len(indexer) + mgr_shape = tuple(mgr_shape) + + if 0 in indexers: + ax0_indexer = indexers.pop(0) + blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) + blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) + else: + + if mgr._is_single_block: + blk = mgr.blocks[0] + return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] + + ax0_indexer = None + blknos = mgr._blknos + blklocs = mgr._blklocs + + plan = [] + for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), + group=False): + assert placements.is_slice_like + + join_unit_indexers = indexers.copy() + + shape = list(mgr_shape) + shape[0] = len(placements) + shape = tuple(shape) + + if blkno == -1: + unit = JoinUnit(None, shape) + else: + blk = mgr.blocks[blkno] + ax0_blk_indexer = blklocs[placements.indexer] + + unit_no_ax0_reindexing = ( + len(placements) == len(blk.mgr_locs) and + # Fastpath detection of join unit not needing to reindex its + # block: no ax0 reindexing took place and block placement was + # sequential before. + ((ax0_indexer is None + and blk.mgr_locs.is_slice_like + and blk.mgr_locs.as_slice.step == 1) or + # Slow-ish detection: all indexer locs are sequential (and + # length match is checked above). + (np.diff(ax0_blk_indexer) == 1).all())) + + # Omit indexer if no item reindexing is required. + if unit_no_ax0_reindexing: + join_unit_indexers.pop(0, None) + else: + join_unit_indexers[0] = ax0_blk_indexer + + unit = JoinUnit(blk, shape, join_unit_indexers) + + plan.append((placements, unit)) + + return plan + + +def combine_concat_plans(plans, concat_axis): + """ + Combine multiple concatenation plans into one. + + existing_plan is updated in-place. + """ + if len(plans) == 1: + for p in plans[0]: + yield p[0], [p[1]] + + elif concat_axis == 0: + offset = 0 + for plan in plans: + last_plc = None + + for plc, unit in plan: + yield plc.add(offset), [unit] + last_plc = plc + + if last_plc is not None: + offset += last_plc.as_slice.stop + + else: + num_ended = [0] + def _next_or_none(seq): + retval = next(seq, None) + if retval is None: + num_ended[0] += 1 + return retval + + plans = list(map(iter, plans)) + next_items = list(map(_next_or_none, plans)) + + while num_ended[0] != len(next_items): + if num_ended[0] > 0: + raise ValueError("Plan shapes are not aligned") + + placements, units = zip(*next_items) + + lengths = list(map(len, placements)) + min_len, max_len = min(lengths), max(lengths) + + if min_len == max_len: + yield placements[0], units + next_items[:] = map(_next_or_none, plans) + else: + yielded_placement = None + yielded_units = [None] * len(next_items) + for i, (plc, unit) in enumerate(next_items): + yielded_units[i] = unit + if len(plc) > min_len: + # trim_join_unit updates unit in place, so only + # placement needs to be sliced to skip min_len. + next_items[i] = (plc[min_len:], + trim_join_unit(unit, min_len)) + else: + yielded_placement = plc + next_items[i] = _next_or_none(plans[i]) + + yield yielded_placement, yielded_units + + +def trim_join_unit(join_unit, length): + """ + Reduce join_unit's shape along item axis to length. + + Extra items that didn't fit are returned as a separate block. + """ + + if 0 not in join_unit.indexers: + extra_indexers = join_unit.indexers + + if join_unit.block is None: + extra_block = None + else: + extra_block = join_unit.block.getitem_block(slice(length, None)) + join_unit.block = join_unit.block.getitem_block(slice(length)) + else: + extra_block = join_unit.block + + extra_indexers = copy.copy(join_unit.indexers) + extra_indexers[0] = extra_indexers[0][length:] + join_unit.indexers[0] = join_unit.indexers[0][:length] + + extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] + join_unit.shape = (length,) + join_unit.shape[1:] + + return JoinUnit(block=extra_block, indexers=extra_indexers, + shape=extra_shape) + + +class JoinUnit(object): + def __init__(self, block, shape, indexers={}): + # Passing shape explicitly is required for cases when block is None. + self.block = block + self.indexers = indexers + self.shape = shape + + def __repr__(self): + return '%s(%r, %s)' % (self.__class__.__name__, + self.block, self.indexers) + + @cache_readonly + def needs_filling(self): + for indexer in self.indexers.values(): + # FIXME: cache results of indexer == -1 checks. + if (indexer == -1).any(): + return True + + return False + + @cache_readonly + def dtype(self): + if self.block is None: + raise AssertionError("Block is None, no dtype") + + if not self.needs_filling: + return self.block.dtype + else: + return np.dtype(com._maybe_promote(self.block.dtype, + self.block.fill_value)[0]) + return self._dtype + + @cache_readonly + def is_null(self): + if self.block is None: + return True + + if not self.block._can_hold_na: + return False + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. 1000 value + # was chosen rather arbitrarily. + values_flat = self.block.values.ravel() + total_len = values_flat.shape[0] + chunk_len = max(total_len // 40, 1000) + for i in range(0, total_len, chunk_len): + if not isnull(values_flat[i: i + chunk_len]).all(): + return False + + return True + + @cache_readonly + def is_sparse(self): + return self.block is not None and self.block.is_sparse + + def get_reindexed_values(self, empty_dtype, upcasted_na): + if upcasted_na is None: + # No upcasting is necessary + fill_value = self.block.fill_value + values = self.block.get_values() + else: + fill_value = upcasted_na + + if self.is_null: + missing_arr = np.empty(self.shape, dtype=empty_dtype) + if np.prod(self.shape): + # NumPy 1.6 workaround: this statement gets strange if all + # blocks are of same dtype and some of them are empty: + # empty one are considered "null" so they must be filled, + # but no dtype upcasting happens and the dtype may not + # allow NaNs. + # + # In general, no one should get hurt when one tries to put + # incorrect values into empty array, but numpy 1.6 is + # strict about that. + missing_arr.fill(fill_value) + return missing_arr + + if self.block.is_bool: + # External code requested filling/upcasting, bool values must + # be upcasted to object to avoid being upcasted to numeric. + values = self.block.astype(np.object_).values + else: + # No dtype upcasting is done here, it will be performed during + # concatenation itself. + values = self.block.get_values() + + if not self.indexers: + # If there's no indexing to be done, we want to signal outside + # code that this array must be copied explicitly. This is done + # by returning a view and checking `retval.base`. + return values.view() + else: + for ax, indexer in self.indexers.items(): + values = com.take_nd(values, indexer, axis=ax, + fill_value=fill_value) + + return values + + +def _fast_count_smallints(arr): + """Faster version of set(arr) for sequences of small numbers.""" + if len(arr) == 0: + # Handle empty arr case separately: numpy 1.6 chokes on that. + return np.empty((0, 2), dtype=arr.dtype) + else: + counts = np.bincount(arr) + nz = counts.nonzero()[0] + return np.c_[nz, counts[nz]] + + +def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): + if isinstance(slice_or_indexer, slice): + return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer, + length) + elif (isinstance(slice_or_indexer, np.ndarray) and + slice_or_indexer.dtype == np.bool_): + return 'mask', slice_or_indexer, slice_or_indexer.sum() + else: + indexer = np.asanyarray(slice_or_indexer, dtype=np.int_) + if not allow_fill: + indexer = _maybe_convert_indices(indexer, length) + return 'fancy', indexer, len(indexer) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 7dc266617c5fd..196b80a83723f 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -447,15 +447,17 @@ def _unstack_frame(obj, level): new_blocks = [] mask_blocks = [] for blk in obj._data.blocks: + blk_items = obj._data.items[blk.mgr_locs.indexer] bunstacker = _Unstacker(blk.values.T, obj.index, level=level, - value_columns=blk.items) + value_columns=blk_items) new_items = bunstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) new_values, mask = bunstacker.get_new_values() - mblk = make_block(mask.T, new_items, new_columns) + mblk = make_block(mask.T, placement=new_placement) mask_blocks.append(mblk) - newb = make_block(new_values.T, new_items, new_columns) + newb = make_block(new_values.T, placement=new_placement) new_blocks.append(newb) result = DataFrame(BlockManager(new_blocks, new_axes)) @@ -1071,10 +1073,11 @@ def make_axis_dummies(frame, axis='minor', transform=None): return DataFrame(values, columns=items, index=frame.index) -def block2d_to_blocknd(values, items, shape, labels, ref_items=None): +def block2d_to_blocknd(values, placement, shape, labels, ref_items): """ pivot to the labels shape """ from pandas.core.internals import make_block - panel_shape = (len(items),) + shape + + panel_shape = (len(placement),) + shape # TODO: lexsort depth needs to be 2!! @@ -1092,13 +1095,10 @@ def block2d_to_blocknd(values, items, shape, labels, ref_items=None): pvalues.fill(fill_value) values = values - for i in range(len(items)): + for i in range(len(placement)): pvalues[i].flat[mask] = values[:, i] - if ref_items is None: - ref_items = items - - return make_block(pvalues, items, ref_items) + return make_block(pvalues, placement=placement) def factor_indexer(shape, labels): diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 105bea92124fd..7da86565b51cd 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -356,7 +356,7 @@ def encode(obj): return {'typ': 'block_manager', 'klass': obj.__class__.__name__, 'axes': data.axes, - 'blocks': [{'items': b.items, + 'blocks': [{'items': data.items.take(b.mgr_locs), 'values': convert(b.values), 'shape': b.values.shape, 'dtype': b.dtype.num, @@ -481,10 +481,11 @@ def decode(obj): axes = obj['axes'] def create_block(b): - dtype = dtype_for(b['dtype']) - return make_block(unconvert(b['values'], dtype, b['compress']) - .reshape(b['shape']), b['items'], axes[0], - klass=getattr(internals, b['klass'])) + values = unconvert(b['values'], dtype_for(b['dtype']), + b['compress']).reshape(b['shape']) + return make_block(values=values, + klass=getattr(internals, b['klass']), + placement=axes[0].get_indexer(b['items'])) blocks = [create_block(b) for b in obj['blocks']] return globals()[obj['klass']](BlockManager(blocks, axes)) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 27298e52e3186..95daa2bbc2752 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1704,11 +1704,11 @@ def set_kind(self): if self.typ is None: self.typ = getattr(self.description, self.cname, None) - def set_atom(self, block, existing_col, min_itemsize, + def set_atom(self, block, block_items, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs): """ create and setup my atom from the block b """ - self.values = list(block.items) + self.values = list(block_items) dtype = block.dtype.name rvalues = block.values.ravel() inferred_type = lib.infer_dtype(rvalues) @@ -1763,7 +1763,7 @@ def set_atom(self, block, existing_col, min_itemsize, # end up here ### elif inferred_type == 'string' or dtype == 'object': self.set_atom_string( - block, + block, block_items, existing_col, min_itemsize, nan_rep, @@ -1776,8 +1776,8 @@ def set_atom(self, block, existing_col, min_itemsize, def get_atom_string(self, block, itemsize): return _tables().StringCol(itemsize=itemsize, shape=block.shape[0]) - def set_atom_string( - self, block, existing_col, min_itemsize, nan_rep, encoding): + def set_atom_string(self, block, block_items, existing_col, min_itemsize, + nan_rep, encoding): # fill nan items with myself, don't disturb the blocks by # trying to downcast block = block.fillna(nan_rep, downcast=False)[0] @@ -1789,9 +1789,9 @@ def set_atom_string( # we cannot serialize this data, so report an exception on a column # by column basis - for item in block.items: + for i, item in enumerate(block_items): - col = block.get(item) + col = block.iget(i) inferred_type = lib.infer_dtype(col.ravel()) if inferred_type != 'string': raise TypeError( @@ -2649,7 +2649,8 @@ def read(self, **kwargs): for i in range(self.nblocks): blk_items = self.read_index('block%d_items' % i) values = self.read_array('block%d_values' % i) - blk = make_block(values, blk_items, items) + blk = make_block(values, + placement=items.get_indexer(blk_items)) blocks.append(blk) return self.obj_type(BlockManager(blocks, axes)) @@ -2665,12 +2666,12 @@ def write(self, obj, **kwargs): self.write_index('axis%d' % i, ax) # Supporting mixed-type DataFrame objects...nontrivial - self.attrs.nblocks = nblocks = len(data.blocks) - for i in range(nblocks): - blk = data.blocks[i] + self.attrs.nblocks = len(data.blocks) + for i, blk in enumerate(data.blocks): # I have no idea why, but writing values before items fixed #2299 - self.write_array('block%d_values' % i, blk.values, items=blk.items) - self.write_index('block%d_items' % i, blk.items) + blk_items = data.items.take(blk.mgr_locs) + self.write_array('block%d_values' % i, blk.values, items=blk_items) + self.write_index('block%d_items' % i, blk_items) class FrameFixed(BlockManagerFixed): @@ -3190,51 +3191,63 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, for a in self.non_index_axes: obj = _reindex_axis(obj, a[0], a[1]) + def get_blk_items(mgr, blocks): + return [mgr.items.take(blk.mgr_locs) for blk in blocks] + # figure out data_columns and get out blocks block_obj = self.get_object(obj).consolidate() blocks = block_obj._data.blocks + blk_items = get_blk_items(block_obj._data, blocks) if len(self.non_index_axes): axis, axis_labels = self.non_index_axes[0] data_columns = self.validate_data_columns( data_columns, min_itemsize) if len(data_columns): - blocks = block_obj.reindex_axis( + mgr = block_obj.reindex_axis( Index(axis_labels) - Index(data_columns), axis=axis - )._data.blocks + )._data + + blocks = list(mgr.blocks) + blk_items = get_blk_items(mgr, blocks) for c in data_columns: - blocks.extend( - block_obj.reindex_axis([c], axis=axis)._data.blocks) + mgr = block_obj.reindex_axis([c], axis=axis)._data + blocks.extend(mgr.blocks) + blk_items.extend(get_blk_items(mgr, mgr.blocks)) # reorder the blocks in the same order as the existing_table if we can if existing_table is not None: - by_items = dict([(tuple(b.items.tolist()), b) for b in blocks]) + by_items = dict([(tuple(b_items.tolist()), (b, b_items)) + for b, b_items in zip(blocks, blk_items)]) new_blocks = [] + new_blk_items = [] for ea in existing_table.values_axes: items = tuple(ea.values) try: - b = by_items.pop(items) + b, b_items = by_items.pop(items) new_blocks.append(b) + new_blk_items.append(b_items) except: raise ValueError( "cannot match existing table structure for [%s] on " "appending data" % ','.join(com.pprint_thing(item) for item in items)) blocks = new_blocks + blk_items = new_blk_items # add my values self.values_axes = [] - for i, b in enumerate(blocks): + for i, (b, b_items) in enumerate(zip(blocks, blk_items)): # shape of the data column are the indexable axes klass = DataCol name = None # we have a data_column - if (data_columns and len(b.items) == 1 and - b.items[0] in data_columns): + if (data_columns and len(b_items) == 1 and + b_items[0] in data_columns): klass = DataIndexableCol - name = b.items[0] + name = b_items[0] self.data_columns.append(name) # make sure that we match up the existing columns @@ -3252,7 +3265,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, try: col = klass.create_for_block( i=i, name=name, version=self.version) - col.set_atom(block=b, + col.set_atom(block=b, block_items=b_items, existing_col=existing_col, min_itemsize=min_itemsize, nan_rep=nan_rep, @@ -3268,7 +3281,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, raise Exception( "cannot find the correct atom type -> " "[dtype->%s,items->%s] %s" - % (b.dtype.name, b.items, str(detail)) + % (b.dtype.name, b_items, str(detail)) ) j += 1 @@ -3490,7 +3503,8 @@ def read(self, where=None, columns=None, **kwargs): take_labels = [l.take(sorter) for l in labels] items = Index(c.values) block = block2d_to_blocknd( - sorted_values, items, tuple(N), take_labels) + values=sorted_values, placement=np.arange(len(items)), + shape=tuple(N), labels=take_labels, ref_items=items) # create the object mgr = BlockManager([block], [items] + levels) @@ -3823,7 +3837,7 @@ def read(self, where=None, columns=None, **kwargs): if values.ndim == 1: values = values.reshape(1, values.shape[0]) - block = make_block(values, cols_, cols_) + block = make_block(values, placement=np.arange(len(cols_))) mgr = BlockManager([block], [cols_, index_]) frames.append(DataFrame(mgr)) diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index b70248d1ef3f4..3054b75ce56ac 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -83,7 +83,6 @@ def test_read_pickles_0_13_0(self): self.read_pickles('0.13.0') def test_round_trip_current(self): - for typ, dv in self.data.items(): for dt, expected in dv.items(): diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 9c9d20e51be64..90c2681b837e8 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -3504,7 +3504,6 @@ def test_invalid_filtering(self): self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']") def test_string_select(self): - # GH 2973 with ensure_clean_store(self.path) as store: diff --git a/pandas/lib.pyx b/pandas/lib.pyx index a1fef095ea277..0bac4f8011420 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -19,6 +19,17 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem, PyTuple_New, PyObject_SetAttrString) +cdef extern from "Python.h": + ctypedef struct PySliceObject: + pass + + cdef int PySlice_GetIndicesEx( + PySliceObject* s, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step, + Py_ssize_t *slicelength) except -1 + + + cimport cpython isnan = np.isnan @@ -1232,6 +1243,419 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys, return result + +@cython.boundscheck(False) +@cython.wraparound(False) +def get_blkno_indexers(int64_t[:] blknos, bint group=True): + """ + Enumerate contiguous runs of integers in ndarray. + + Iterate over elements of `blknos` yielding ``(blkno, slice(start, stop))`` + pairs for each contiguous run found. + + If `group` is True and there is more than one run for a certain blkno, + ``(blkno, array)`` with an array containing positions of all elements equal + to blkno. + + Returns + ------- + iter : iterator of (int, slice or array) + + """ + # There's blkno in this function's name because it's used in block & + # blockno handling. + cdef: + int64_t cur_blkno + Py_ssize_t i, start, stop, n, diff + + list group_order + dict group_slices + int64_t[:] res_view + + n = blknos.shape[0] + + if n > 0: + start = 0 + cur_blkno = blknos[start] + + if group == False: + for i in range(1, n): + if blknos[i] != cur_blkno: + yield cur_blkno, slice(start, i) + + start = i + cur_blkno = blknos[i] + + yield cur_blkno, slice(start, n) + else: + group_order = [] + group_dict = {} + + for i in range(1, n): + if blknos[i] != cur_blkno: + if cur_blkno not in group_dict: + group_order.append(cur_blkno) + group_dict[cur_blkno] = [(start, i)] + else: + group_dict[cur_blkno].append((start, i)) + + start = i + cur_blkno = blknos[i] + + if cur_blkno not in group_dict: + group_order.append(cur_blkno) + group_dict[cur_blkno] = [(start, n)] + else: + group_dict[cur_blkno].append((start, n)) + + for blkno in group_order: + slices = group_dict[blkno] + if len(slices) == 1: + yield blkno, slice(slices[0][0], slices[0][1]) + else: + tot_len = sum(stop - start for start, stop in slices) + result = np.empty(tot_len, dtype=np.int64) + res_view = result + + i = 0 + for start, stop in slices: + for diff in range(start, stop): + res_view[i] = diff + i += 1 + + yield blkno, result + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef slice indexer_as_slice(int64_t[:] vals): + cdef: + Py_ssize_t i, n, start, stop + int64_t d + + if vals is None: + raise TypeError("vals must be ndarray") + + n = vals.shape[0] + + if n == 0 or vals[0] < 0: + return None + + if n == 1: + return slice(vals[0], vals[0] + 1, 1) + + if vals[1] < 0: + return None + + # n > 2 + d = vals[1] - vals[0] + + if d == 0: + return None + + for i in range(2, n): + if vals[i] < 0 or vals[i] - vals[i-1] != d: + return None + + start = vals[0] + stop = start + n * d + if stop < 0 and d < 0: + return slice(start, None, d) + else: + return slice(start, stop, d) + + +cpdef slice_canonize(slice s): + """ + Convert slice to canonical bounded form. + """ + cdef: + Py_ssize_t start, stop, step, length + + if s.step is None: + step = 1 + else: + step = <Py_ssize_t>s.step + if step == 0: + raise ValueError("slice step cannot be zero") + + if step > 0: + if s.stop is None: + raise ValueError("unbounded slice") + + stop = <Py_ssize_t>s.stop + if s.start is None: + start = 0 + else: + start = <Py_ssize_t>s.start + if start > stop: + start = stop + elif step < 0: + if s.start is None: + raise ValueError("unbounded slice") + + start = <Py_ssize_t>s.start + if s.stop is None: + stop = -1 + else: + stop = <Py_ssize_t>s.stop + if stop > start: + stop = start + + if start < 0 or (stop < 0 and s.stop is not None): + raise ValueError("unbounded slice") + + if stop < 0: + return slice(start, None, step) + else: + return slice(start, stop, step) + + +cpdef slice_get_indices_ex(slice slc, Py_ssize_t objlen=INT64_MAX): + """ + Get (start, stop, step, length) tuple for a slice. + + If `objlen` is not specified, slice must be bounded, otherwise the result + will be wrong. + + """ + cdef: + Py_ssize_t start, stop, step, length + + if slc is None: + raise TypeError("slc should be a slice") + + PySlice_GetIndicesEx(<PySliceObject*>slc, objlen, + &start, &stop, &step, &length) + return start, stop, step, length + + +cpdef Py_ssize_t slice_len(slice slc, Py_ssize_t objlen=INT64_MAX) except -1: + """ + Get length of a bounded slice. + + The slice must not have any "open" bounds that would create dependency on + container size, i.e.: + - if ``s.step is None or s.step > 0``, ``s.stop`` is not ``None`` + - if ``s.step < 0``, ``s.start`` is not ``None`` + + Otherwise, the result is unreliable. + + """ + cdef: + Py_ssize_t start, stop, step, length + + if slc is None: + raise TypeError("slc must be slice") + + PySlice_GetIndicesEx(<PySliceObject*>slc, objlen, + &start, &stop, &step, &length) + + return length + + +def slice_getitem(slice slc not None, ind): + cdef: + Py_ssize_t s_start, s_stop, s_step, s_len + Py_ssize_t ind_start, ind_stop, ind_step, ind_len + + s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc) + + if isinstance(ind, slice): + ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, + s_len) + + if ind_step > 0 and ind_len == s_len: + # short-cut for no-op slice + if ind_len == s_len: + return slc + + if ind_step < 0: + s_start = s_stop - s_step + ind_step = -ind_step + + s_step *= ind_step + s_stop = s_start + ind_stop * s_step + s_start = s_start + ind_start * s_step + + if s_step < 0 and s_stop < 0: + return slice(s_start, None, s_step) + else: + return slice(s_start, s_stop, s_step) + + else: + return np.arange(s_start, s_stop, s_step)[ind] + + +cdef class BlockPlacement: + # __slots__ = '_as_slice', '_as_array', '_len' + cdef slice _as_slice + cdef object _as_array + + cdef bint _has_slice, _has_array, _is_known_slice_like + + def __init__(self, val): + cdef slice slc + + self._has_slice = False + self._has_array = False + + if isinstance(val, slice): + slc = slice_canonize(val) + + if slc.start != slc.stop: + self._as_slice = slc + self._has_slice = True + else: + arr = np.empty(0, dtype=np.int64) + self._as_array = arr + self._has_array = True + else: + # Cython memoryview interface requires ndarray to be writeable. + arr = np.require(val, dtype=np.int64, requirements='W') + assert arr.ndim == 1 + self._as_array = arr + self._has_array = True + + def __unicode__(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + v = self._as_slice + else: + v = self._as_array + + return '%s(%r)' % (self.__class__.__name__, v) + + def __len__(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return slice_len(s) + else: + return len(self._as_array) + + def __iter__(self): + cdef slice s = self._ensure_has_slice() + cdef Py_ssize_t start, stop, step, _ + if s is not None: + start, stop, step, _ = slice_get_indices_ex(s) + return iter(range(start, stop, step)) + else: + return iter(self._as_array) + + @property + def as_slice(self): + cdef slice s = self._ensure_has_slice() + if s is None: + raise TypeError('Not slice-like') + else: + return s + + @property + def indexer(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return s + else: + return self._as_array + + def isin(self, arr): + from pandas.core.index import Int64Index + return Int64Index(self.as_array, copy=False).isin(arr) + + @property + def as_array(self): + cdef Py_ssize_t start, stop, end, _ + if not self._has_array: + start, stop, step, _ = slice_get_indices_ex(self._as_slice) + self._as_array = np.arange(start, stop, step, + dtype=np.int_) + self._has_array = True + return self._as_array + + @property + def is_slice_like(self): + cdef slice s = self._ensure_has_slice() + return s is not None + + def __getitem__(self, loc): + cdef slice s = self._ensure_has_slice() + if s is not None: + val = slice_getitem(s, loc) + else: + val = self._as_array[loc] + + if not isinstance(val, slice) and val.ndim == 0: + return val + + return BlockPlacement(val) + + def delete(self, loc): + return BlockPlacement(np.delete(self.as_array, loc, axis=0)) + + def append(self, others): + if len(others) == 0: + return self + + return BlockPlacement(np.concatenate([self.as_array] + + [o.as_array for o in others])) + + cdef iadd(self, other): + cdef slice s = self._ensure_has_slice() + cdef Py_ssize_t other_int, start, stop, step, l + + if isinstance(other, int) and s is not None: + other_int = <Py_ssize_t>other + + if other_int == 0: + return self + + start, stop, step, l = slice_get_indices_ex(s) + start += other_int + stop += other_int + + if ((step > 0 and start < 0) or + (step < 0 and stop < step)): + raise ValueError("iadd causes length change") + + if stop < 0: + self._as_slice = slice(start, None, step) + else: + self._as_slice = slice(start, stop, step) + + self._has_array = False + self._as_array = None + else: + newarr = self.as_array + other + if (newarr < 0).any(): + raise ValueError("iadd causes length change") + + self._as_array = newarr + self._has_array = True + self._has_slice = False + self._as_slice = None + + return self + + cdef BlockPlacement copy(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return BlockPlacement(s) + else: + return BlockPlacement(self._as_array) + + def add(self, other): + return self.copy().iadd(other) + + def sub(self, other): + return self.add(-other) + + cdef slice _ensure_has_slice(self): + if not self._has_slice: + self._as_slice = indexer_as_slice(self._as_array) + self._has_slice = True + return self._as_slice + + include "reduce.pyx" include "properties.pyx" include "inference.pyx" diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 1c599653f9fc5..48576266c3b5f 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -541,7 +541,7 @@ def sparse_reindex(self, new_index): raise TypeError('new index must be a SparseIndex') block = self.block.sparse_reindex(new_index) - new_data = SingleBlockManager(block, block.ref_items) + new_data = SingleBlockManager(block, self.index) return self._constructor(new_data, index=self.index, sparse_index=new_index, fill_value=self.fill_value).__finalize__(self) diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 7696353dca6f1..3a2f8adf719e4 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -1023,7 +1023,7 @@ def _compare_to_dense(a, b, da, db, op): for op in ops: _compare_to_dense(frame, frame[::2], frame.to_dense(), frame[::2].to_dense(), op) - for s in series: + for i, s in enumerate(series): _compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), op) _compare_to_dense(s, frame, s.to_dense(), diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 3a3d5a822163f..2aac364d16770 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -106,7 +106,6 @@ class CheckIndexing(object): def test_getitem(self): # slicing - sl = self.frame[:20] self.assertEqual(20, len(sl.index)) @@ -120,7 +119,7 @@ def test_getitem(self): self.assertIsNotNone(self.frame[key]) self.assertNotIn('random', self.frame) - with assertRaisesRegexp(KeyError, 'no item named random'): + with assertRaisesRegexp(KeyError, 'random'): self.frame['random'] df = self.frame.copy() @@ -2723,6 +2722,11 @@ def test_constructor_corner(self): df = DataFrame({}, columns=['foo', 'bar']) self.assertEqual(df.values.dtype, np.object_) + df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'), + dtype=int) + self.assertEqual(df.values.dtype, np.object_) + + def test_constructor_scalar_inference(self): data = {'int': 1, 'bool': True, 'float': 3., 'complex': 4j, 'object': 'foo'} @@ -3341,7 +3345,6 @@ def test_column_dups2(self): assert_frame_equal(result, expected) def test_column_dups_indexing(self): - def check(result, expected=None): if expected is not None: assert_frame_equal(result,expected) @@ -7804,11 +7807,11 @@ def test_regex_replace_dict_mixed(self): # scalar -> dict # to_replace regex, {value: value} + expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c': + mix['c']}) res = dfmix.replace('a', {'b': nan}, regex=True) res2 = dfmix.copy() res2.replace('a', {'b': nan}, regex=True, inplace=True) - expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c': - mix['c']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) @@ -8645,7 +8648,6 @@ def test_reindex_dups(self): self.assertRaises(ValueError, df.reindex, index=list(range(len(df)))) def test_align(self): - af, bf = self.frame.align(self.frame) self.assertIsNot(af._data, self.frame._data) @@ -9789,7 +9791,7 @@ def test_reorder_levels(self): assert_frame_equal(result, expected) def test_sort_index(self): - frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4], + frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4], columns=['A', 'B', 'C', 'D']) # axis=0 @@ -11820,8 +11822,8 @@ def test_columns_with_dups(self): df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns) df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) - result = df._data._set_ref_locs() - self.assertEqual(len(result), len(df.columns)) + self.assertEqual(len(df._data._blknos), len(df.columns)) + self.assertEqual(len(df._data._blklocs), len(df.columns)) # testing iget for i in range(len(df.columns)): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 261e1dd2a590c..a105b17795398 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1015,7 +1015,7 @@ def test_iloc_getitem_doc_issue(self): columns = list(range(0,8,2)) df = DataFrame(arr,index=index,columns=columns) - df._data.blocks[0].ref_locs + df._data.blocks[0].mgr_locs result = df.iloc[1:5,2:4] str(result) result.dtypes diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 2c9c8a94a1902..b91384a840c33 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -4,6 +4,7 @@ import numpy as np from pandas import Index, MultiIndex, DataFrame, Series +from pandas.compat import OrderedDict from pandas.sparse.array import SparseArray from pandas.core.internals import * import pandas.core.internals as internals @@ -17,89 +18,159 @@ def assert_block_equal(left, right): assert_almost_equal(left.values, right.values) assert(left.dtype == right.dtype) - assert(left.items.equals(right.items)) - assert(left.ref_items.equals(right.ref_items)) + assert_almost_equal(left.mgr_locs, right.mgr_locs) -def get_float_mat(n, k, dtype): - return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0) - -TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2'] -N = 10 - - -def get_float_ex(cols=['a', 'c', 'e'], dtype = np.float_): - floats = get_float_mat(N, len(cols), dtype = dtype).T - return make_block(floats, cols, TEST_COLS) - - -def get_complex_ex(cols=['h']): - complexes = (get_float_mat(N, 1, dtype = np.float_).T * 1j).astype(np.complex128) - return make_block(complexes, cols, TEST_COLS) - - -def get_obj_ex(cols=['b', 'd']): - mat = np.empty((N, 2), dtype=object) - mat[:, 0] = 'foo' - mat[:, 1] = 'bar' - return make_block(mat.T, cols, TEST_COLS) - -def get_bool_ex(cols=['f']): - mat = np.ones((N, 1), dtype=bool) - return make_block(mat.T, cols, TEST_COLS) +def get_numeric_mat(shape): + arr = np.arange(shape[0]) + return np.lib.stride_tricks.as_strided( + x=arr, shape=shape, + strides=(arr.itemsize,) + (0,) * (len(shape) - 1)).copy() -def get_int_ex(cols=['g'], dtype = np.int_): - mat = randn(N, 1).astype(dtype) - return make_block(mat.T, cols, TEST_COLS) - +N = 10 -def get_dt_ex(cols=['h']): - mat = randn(N, 1).astype(int).astype('M8[ns]') - return make_block(mat.T, cols, TEST_COLS) -def get_sparse_ex1(): - sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) - return make_block(sa1, ['s1'], TEST_COLS) +def create_block(typestr, placement, item_shape=None, num_offset=0): + """ + Supported typestr: + + * float, f8, f4, f2 + * int, i8, i4, i2, i1 + * uint, u8, u4, u2, u1 + * complex, c16, c8 + * bool + * object, string, O + * datetime, dt + * sparse (SparseArray with fill_value=0.0) + * sparse_na (SparseArray with fill_value=np.nan) + + """ + placement = BlockPlacement(placement) + num_items = len(placement) + + if item_shape is None: + item_shape = (N,) + + shape = (num_items,) + item_shape + + mat = get_numeric_mat(shape) + + if typestr in ('float', 'f8', 'f4', 'f2', + 'int', 'i8', 'i4', 'i2', 'i1', + 'uint', 'u8', 'u4', 'u2', 'u1'): + values = mat.astype(typestr) + num_offset + elif typestr in ('complex', 'c16', 'c8'): + values = 1.j * (mat.astype(typestr) + num_offset) + elif typestr in ('object', 'string', 'O'): + values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset], + shape) + elif typestr in ('bool'): + values = np.ones(shape, dtype=np.bool_) + elif typestr in ('datetime', 'dt'): + values = (mat * 1e9).astype('M8[ns]') + elif typestr in ('sparse', 'sparse_na'): + # FIXME: doesn't support num_rows != 10 + assert shape[-1] == 10 + assert all(s == 1 for s in shape[:-1]) + if typestr.endswith('_na'): + fill_value = np.nan + else: + fill_value = 0.0 + values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value, + 4, 5, fill_value, 6], fill_value=fill_value) + arr = values.sp_values.view() + arr += (num_offset - 1) + else: + raise ValueError('Unsupported typestr: "%s"' % typestr) + + return make_block(values, placement=placement, ndim=len(shape)) + + +def create_single_mgr(typestr, num_rows=None): + if num_rows is None: + num_rows = N + + return SingleBlockManager( + create_block(typestr, placement=slice(0, num_rows), item_shape=()), + np.arange(num_rows)) + + +def create_mgr(descr, item_shape=None): + """ + Construct BlockManager from string description. + + String description syntax looks similar to np.matrix initializer. It looks + like this:: + + a,b,c: f8; d,e,f: i8 + + Rules are rather simple: + + * see list of supported datatypes in `create_block` method + * components are semicolon-separated + * each component is `NAME,NAME,NAME: DTYPE_ID` + * whitespace around colons & semicolons are removed + * components with same DTYPE_ID are combined into single block + * to force multiple blocks with same dtype, use '-SUFFIX':: + + 'a:f8-1; b:f8-2; c:f8-foobar' + + """ + if item_shape is None: + item_shape = (N,) + + offset = 0 + mgr_items = [] + block_placements = OrderedDict() + for d in descr.split(';'): + d = d.strip() + names, blockstr = d.partition(':')[::2] + blockstr = blockstr.strip() + names = names.strip().split(',') + + mgr_items.extend(names) + placement = list(np.arange(len(names)) + offset) + try: + block_placements[blockstr].extend(placement) + except KeyError: + block_placements[blockstr] = placement + offset += len(names) -def get_sparse_ex2(): - sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0) - return make_block(sa2, ['s2'], TEST_COLS) + mgr_items = Index(mgr_items) -def create_blockmanager(blocks): - l = [] - for b in blocks: - l.extend(b.items) - items = Index(l) - for b in blocks: - b.ref_items = items + blocks = [] + num_offset = 0 + for blockstr, placement in block_placements.items(): + typestr = blockstr.split('-')[0] + blocks.append(create_block(typestr, placement, item_shape=item_shape, + num_offset=num_offset,)) + num_offset += len(placement) - index_sz = blocks[0].shape[1] - return BlockManager(blocks, [items, np.arange(index_sz)]) + return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]), + [mgr_items] + [np.arange(n) for n in item_shape]) -def create_singleblockmanager(blocks): - l = [] - for b in blocks: - l.extend(b.items) - items = Index(l) - for b in blocks: - b.ref_items = items - return SingleBlockManager(blocks, [items]) class TestBlock(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): - self.fblock = get_float_ex() - self.cblock = get_complex_ex() - self.oblock = get_obj_ex() - self.bool_block = get_bool_ex() - self.int_block = get_int_ex() + # self.fblock = get_float_ex() # a,c,e + # self.cblock = get_complex_ex() # + # self.oblock = get_obj_ex() + # self.bool_block = get_bool_ex() + # self.int_block = get_int_ex() + + self.fblock = create_block('float', [0, 2, 4]) + self.cblock = create_block('complex', [7]) + self.oblock = create_block('object', [1, 3]) + self.bool_block = create_block('bool', [5]) + self.int_block = create_block('int', [6]) def test_constructor(self): - int32block = get_int_ex(['a'],dtype = np.int32) + int32block = create_block('i4', [0]) self.assertEqual(int32block.dtype, np.int32) def test_pickle(self): @@ -115,8 +186,8 @@ def _check(blk): _check(self.oblock) _check(self.bool_block) - def test_ref_locs(self): - assert_almost_equal(self.fblock.ref_locs, [0, 2, 4]) + def test_mgr_locs(self): + assert_almost_equal(self.fblock.mgr_locs, [0, 2, 4]) def test_attrs(self): self.assertEqual(self.fblock.shape, self.fblock.values.shape) @@ -127,16 +198,16 @@ def test_merge(self): avals = randn(2, 10) bvals = randn(2, 10) - ref_cols = ['e', 'a', 'b', 'd', 'f'] + ref_cols = Index(['e', 'a', 'b', 'd', 'f']) - ablock = make_block(avals, ['e', 'b'], ref_cols) - bblock = make_block(bvals, ['a', 'd'], ref_cols) + ablock = make_block(avals, + ref_cols.get_indexer(['e', 'b'])) + bblock = make_block(bvals, + ref_cols.get_indexer(['a', 'd'])) merged = ablock.merge(bblock) - exvals = np.vstack((avals, bvals)) - excols = ['e', 'b', 'a', 'd'] - eblock = make_block(exvals, excols, ref_cols) - eblock = eblock.reindex_items_from(ref_cols) - assert_block_equal(merged, eblock) + assert_almost_equal(merged.mgr_locs, [0, 1, 2, 3]) + assert_almost_equal(merged.values[[0, 2]], avals) + assert_almost_equal(merged.values[[1, 3]], bvals) # TODO: merge with mixed type? @@ -145,29 +216,9 @@ def test_copy(self): self.assertIsNot(cop, self.fblock) assert_block_equal(self.fblock, cop) - def test_items(self): - cols = self.fblock.items - self.assert_numpy_array_equal(cols, ['a', 'c', 'e']) - - cols2 = self.fblock.items - self.assertIs(cols, cols2) - - def test_assign_ref_items(self): - new_cols = Index(['foo', 'bar', 'baz', 'quux', 'hi']) - self.fblock.set_ref_items(new_cols) - self.assert_numpy_array_equal(self.fblock.items, ['foo', 'baz', 'hi']) - def test_reindex_index(self): pass - def test_reindex_items_from(self): - new_cols = Index(['e', 'b', 'c', 'f']) - reindexed = self.fblock.reindex_items_from(new_cols) - assert_almost_equal(reindexed.ref_locs, [0, 2]) - self.assertEquals(reindexed.values.shape[0], 2) - self.assert_((reindexed.values[0] == 2).all()) - self.assert_((reindexed.values[1] == 1).all()) - def test_reindex_cast(self): pass @@ -175,19 +226,23 @@ def test_insert(self): pass def test_delete(self): - newb = self.fblock.delete('a') - assert_almost_equal(newb.ref_locs, [2, 4]) + newb = self.fblock.copy() + newb.delete(0) + assert_almost_equal(newb.mgr_locs, [2, 4]) self.assert_((newb.values[0] == 1).all()) - newb = self.fblock.delete('c') - assert_almost_equal(newb.ref_locs, [0, 4]) + newb = self.fblock.copy() + newb.delete(1) + assert_almost_equal(newb.mgr_locs, [0, 4]) self.assert_((newb.values[1] == 2).all()) - newb = self.fblock.delete('e') - assert_almost_equal(newb.ref_locs, [0, 2]) + newb = self.fblock.copy() + newb.delete(2) + assert_almost_equal(newb.mgr_locs, [0, 2]) self.assert_((newb.values[1] == 1).all()) - self.assertRaises(Exception, self.fblock.delete, 'b') + newb = self.fblock.copy() + self.assertRaises(Exception, newb.delete, 3) def test_split_block_at(self): @@ -212,13 +267,6 @@ def test_split_block_at(self): bs = list(bblock.split_block_at('f')) self.assertEqual(len(bs), 0) - def test_unicode_repr(self): - mat = np.empty((N, 2), dtype=object) - mat[:, 0] = 'foo' - mat[:, 1] = 'bar' - cols = ['b', u("\u05d0")] - str_repr = repr(make_block(mat.T, cols, TEST_COLS)) - def test_get(self): pass @@ -233,76 +281,52 @@ def test_repr(self): class TestBlockManager(tm.TestCase): - _multiprocess_can_split_ = True def setUp(self): - self.blocks = [get_float_ex(), - get_obj_ex(), - get_bool_ex(), - get_int_ex(), - get_complex_ex()] - - all_items = [b.items for b in self.blocks] - - items = sorted(all_items[0].append(all_items[1:])) - items = Index(items) - for b in self.blocks: - b.ref_items = items - - self.mgr = BlockManager(self.blocks, [items, np.arange(N)]) + self.mgr = create_mgr('a: f8; b: object; c: f8; d: object; e: f8;' + 'f: bool; g: i8; h: complex') def test_constructor_corner(self): pass def test_attrs(self): - self.assertEquals(self.mgr.nblocks, len(self.mgr.blocks)) - self.assertEquals(len(self.mgr), len(self.mgr.items)) + mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2') + self.assertEquals(mgr.nblocks, 2) + self.assertEquals(len(mgr), 6) def test_is_mixed_dtype(self): - self.assertTrue(self.mgr.is_mixed_type) + self.assertFalse(create_mgr('a,b:f8').is_mixed_type) + self.assertFalse(create_mgr('a:f8-1; b:f8-2').is_mixed_type) - mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])]) - self.assertFalse(mgr.is_mixed_type) + self.assertTrue(create_mgr('a,b:f8; c,d: f4').is_mixed_type) + self.assertTrue(create_mgr('a,b:f8; c,d: object').is_mixed_type) def test_is_indexed_like(self): - self.assertTrue(self.mgr._is_indexed_like(self.mgr)) - mgr2 = self.mgr.reindex_axis(np.arange(N - 1), axis=1) - self.assertFalse(self.mgr._is_indexed_like(mgr2)) - - def test_block_id_vector_item_dtypes(self): - expected = [0, 1, 0, 1, 0, 2, 3, 4] - result = self.mgr.block_id_vector - assert_almost_equal(expected, result) - - result = self.mgr.item_dtypes - - # as the platform may not exactly match this, pseudo match - expected = ['float64', 'object', 'float64', 'object', 'float64', - 'bool', 'int64', 'complex128'] - for e, r in zip(expected, result): - np.dtype(e).kind == np.dtype(r).kind - - def test_duplicate_item_failure(self): - items = Index(['a', 'a']) - blocks = [get_bool_ex(['a']), get_float_ex(['a'])] - for b in blocks: - b.ref_items = items - - # test trying to create _ref_locs with/o ref_locs set on the blocks - self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)]) - - blocks[0].set_ref_locs([0]) - blocks[1].set_ref_locs([1]) - mgr = BlockManager(blocks, [items, np.arange(N)]) - mgr.iget(1) + mgr1 = create_mgr('a,b: f8') + mgr2 = create_mgr('a:i8; b:bool') + mgr3 = create_mgr('a,b,c: f8') + self.assertTrue(mgr1._is_indexed_like(mgr1)) + self.assertTrue(mgr1._is_indexed_like(mgr2)) + self.assertTrue(mgr1._is_indexed_like(mgr3)) + + self.assertFalse(mgr1._is_indexed_like( + mgr1.get_slice(slice(-1), axis=1))) + + def test_duplicate_ref_loc_failure(self): + tmp_mgr = create_mgr('a:bool; a: f8') + + axes, blocks = tmp_mgr.axes, tmp_mgr.blocks - # invalidate the _ref_locs - for b in blocks: - b._ref_locs = None - mgr._ref_locs = None - mgr._items_map = None - self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True) + blocks[0].mgr_locs = np.array([0]) + blocks[1].mgr_locs = np.array([0]) + # test trying to create block manager with overlapping ref locs + self.assertRaises(AssertionError, BlockManager, blocks, axes) + + blocks[0].mgr_locs = np.array([0]) + blocks[1].mgr_locs = np.array([1]) + mgr = BlockManager(blocks, axes) + mgr.iget(1) def test_contains(self): self.assertIn('a', self.mgr) @@ -318,7 +342,7 @@ def test_pickle(self): assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2)) # share ref_items - self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items) + # self.assertIs(mgr2.blocks[0].ref_items, mgr2.blocks[1].ref_items) # GH2431 self.assertTrue(hasattr(mgr2, "_is_consolidated")) @@ -328,9 +352,6 @@ def test_pickle(self): self.assertFalse(mgr2._is_consolidated) self.assertFalse(mgr2._known_consolidated) - def test_get(self): - pass - def test_get_scalar(self): for item in self.mgr.items: for i, index in enumerate(self.mgr.axes[1]): @@ -338,8 +359,35 @@ def test_get_scalar(self): exp = self.mgr.get(item)[i] assert_almost_equal(res, exp) + def test_get(self): + cols = Index(list('abc')) + values = np.random.rand(3, 3) + block = make_block(values=values.copy(), + placement=np.arange(3)) + mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) + + assert_almost_equal(mgr.get('a'), values[0]) + assert_almost_equal(mgr.get('b'), values[1]) + assert_almost_equal(mgr.get('c'), values[2]) + def test_set(self): - pass + mgr = create_mgr('a,b,c: int', item_shape=(3,)) + + mgr.set('d', np.array(['foo'] * 3)) + mgr.set('b', np.array(['bar'] * 3)) + assert_almost_equal(mgr.get('a'), [0] * 3) + assert_almost_equal(mgr.get('b'), ['bar'] * 3) + assert_almost_equal(mgr.get('c'), [2] * 3) + assert_almost_equal(mgr.get('d'), ['foo'] * 3) + + def test_insert(self): + self.mgr.insert(0, 'inserted', np.arange(N)) + + self.assertEqual(self.mgr.items[0], 'inserted') + assert_almost_equal(self.mgr.get('inserted'), np.arange(N)) + + for blk in self.mgr.blocks: + yield self.assertIs, self.mgr.items, blk.ref_items def test_set_change_dtype(self): self.mgr.set('baz', np.zeros(N, dtype=bool)) @@ -370,58 +418,68 @@ def test_copy(self): self.assertTrue(found) def test_sparse(self): - mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2()]) + mgr = create_mgr('a: sparse-1; b: sparse-2') # what to test here? self.assertEqual(mgr.as_matrix().dtype, np.float64) def test_sparse_mixed(self): - mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2(),get_float_ex()]) + mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8') self.assertEqual(len(mgr.blocks), 3) self.assertIsInstance(mgr, BlockManager) # what to test here? def test_as_matrix_float(self): - - mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)]) + mgr = create_mgr('c: f4; d: f2; e: f8') self.assertEqual(mgr.as_matrix().dtype, np.float64) - mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16)]) + mgr = create_mgr('c: f4; d: f2') self.assertEqual(mgr.as_matrix().dtype, np.float32) def test_as_matrix_int_bool(self): - - mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])]) + mgr = create_mgr('a: bool-1; b: bool-2') self.assertEqual(mgr.as_matrix().dtype, np.bool_) - mgr = create_blockmanager([get_int_ex(['a'],np.int64), get_int_ex(['b'],np.int64), get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ]) + mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1') self.assertEqual(mgr.as_matrix().dtype, np.int64) - mgr = create_blockmanager([get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ]) + mgr = create_mgr('c: i4; d: i2; e: u1') self.assertEqual(mgr.as_matrix().dtype, np.int32) def test_as_matrix_datetime(self): - mgr = create_blockmanager([get_dt_ex(['h']), get_dt_ex(['g'])]) + mgr = create_mgr('h: datetime-1; g: datetime-2') self.assertEqual(mgr.as_matrix().dtype, 'M8[ns]') def test_astype(self): - # coerce all - mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)]) - - for t in ['float16','float32','float64','int32','int64']: + mgr = create_mgr('c: f4; d: f2; e: f8') + for t in ['float16', 'float32', 'float64', 'int32', 'int64']: + t = np.dtype(t) tmgr = mgr.astype(t) - self.assertEqual(tmgr.as_matrix().dtype, np.dtype(t)) + self.assertEqual(tmgr.get('c').dtype.type, t) + self.assertEqual(tmgr.get('d').dtype.type, t) + self.assertEqual(tmgr.get('e').dtype.type, t) # mixed - mgr = create_blockmanager([get_obj_ex(['a','b']),get_bool_ex(['c']),get_dt_ex(['d']),get_float_ex(['e'],np.float32), get_float_ex(['f'],np.float16), get_float_ex(['g'],np.float64)]) - for t in ['float16','float32','float64','int32','int64']: - tmgr = mgr.astype(t, raise_on_error = False).get_numeric_data() - self.assertEqual(tmgr.as_matrix().dtype, np.dtype(t)) + mgr = create_mgr('a,b: object; c: bool; d: datetime;' + 'e: f4; f: f2; g: f8') + for t in ['float16', 'float32', 'float64', 'int32', 'int64']: + t = np.dtype(t) + tmgr = mgr.astype(t, raise_on_error=False) + self.assertEqual(tmgr.get('c').dtype.type, t) + self.assertEqual(tmgr.get('e').dtype.type, t) + self.assertEqual(tmgr.get('f').dtype.type, t) + self.assertEqual(tmgr.get('g').dtype.type, t) + + self.assertEqual(tmgr.get('a').dtype.type, np.object_) + self.assertEqual(tmgr.get('b').dtype.type, np.object_) + if t != np.int64: + self.assertEqual(tmgr.get('d').dtype.type, np.datetime64) + else: + self.assertEqual(tmgr.get('d').dtype.type, t) def test_convert(self): - def _compare(old_mgr, new_mgr): """ compare the blocks, numeric compare ==, object don't """ old_blocks = set(old_mgr.blocks) @@ -446,45 +504,41 @@ def _compare(old_mgr, new_mgr): self.assertTrue(found) # noops - mgr = create_blockmanager([get_int_ex(['f']), get_float_ex(['g'])]) + mgr = create_mgr('f: i8; g: f8') new_mgr = mgr.convert() _compare(mgr,new_mgr) - mgr = create_blockmanager([get_obj_ex(['a','b']), get_int_ex(['f']), get_float_ex(['g'])]) + mgr = create_mgr('a, b: object; f: i8; g: f8') new_mgr = mgr.convert() _compare(mgr,new_mgr) - # there could atcually be multiple dtypes resulting - def _check(new_mgr,block_type, citems): - items = set() - for b in new_mgr.blocks: - if isinstance(b,block_type): - for i in list(b.items): - items.add(i) - self.assertEqual(items, set(citems)) - # convert - mat = np.empty((N, 3), dtype=object) - mat[:, 0] = '1' - mat[:, 1] = '2.' - mat[:, 2] = 'foo' - b = make_block(mat.T, ['a','b','foo'], TEST_COLS) - - mgr = create_blockmanager([b, get_int_ex(['f']), get_float_ex(['g'])]) - new_mgr = mgr.convert(convert_numeric = True) - - _check(new_mgr,FloatBlock,['b','g']) - _check(new_mgr,IntBlock,['a','f']) - - mgr = create_blockmanager([b, get_int_ex(['f'],np.int32), get_bool_ex(['bool']), get_dt_ex(['dt']), - get_int_ex(['i'],np.int64), get_float_ex(['g'],np.float64), get_float_ex(['h'],np.float16)]) - new_mgr = mgr.convert(convert_numeric = True) - - _check(new_mgr,FloatBlock,['b','g','h']) - _check(new_mgr,IntBlock,['a','f','i']) - _check(new_mgr,ObjectBlock,['foo']) - _check(new_mgr,BoolBlock,['bool']) - _check(new_mgr,DatetimeBlock,['dt']) + mgr = create_mgr('a,b,foo: object; f: i8; g: f8') + mgr.set('a', np.array(['1'] * N, dtype=np.object_)) + mgr.set('b', np.array(['2.'] * N, dtype=np.object_)) + mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_)) + new_mgr = mgr.convert(convert_numeric=True) + self.assertEquals(new_mgr.get('a').dtype.type, np.int64) + self.assertEquals(new_mgr.get('b').dtype.type, np.float64) + self.assertEquals(new_mgr.get('foo').dtype.type, np.object_) + self.assertEquals(new_mgr.get('f').dtype.type, np.int64) + self.assertEquals(new_mgr.get('g').dtype.type, np.float64) + + mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;' + 'i: i8; g: f8; h: f2') + mgr.set('a', np.array(['1'] * N, dtype=np.object_)) + mgr.set('b', np.array(['2.'] * N, dtype=np.object_)) + mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_)) + new_mgr = mgr.convert(convert_numeric=True) + self.assertEquals(new_mgr.get('a').dtype.type, np.int64) + self.assertEquals(new_mgr.get('b').dtype.type, np.float64) + self.assertEquals(new_mgr.get('foo').dtype.type, np.object_) + self.assertEquals(new_mgr.get('f').dtype.type, np.int32) + self.assertEquals(new_mgr.get('bool').dtype.type, np.bool_) + self.assertEquals(new_mgr.get('dt').dtype.type, np.datetime64) + self.assertEquals(new_mgr.get('i').dtype.type, np.int64) + self.assertEquals(new_mgr.get('g').dtype.type, np.float64) + self.assertEquals(new_mgr.get('h').dtype.type, np.float16) def test_interleave(self): pass @@ -512,69 +566,79 @@ def test_consolidate_ordering_issues(self): cons = self.mgr.consolidate() self.assertEquals(cons.nblocks, 1) - self.assertTrue(cons.blocks[0].items.equals(cons.items)) + assert_almost_equal(cons.blocks[0].mgr_locs, + np.arange(len(cons.items))) def test_reindex_index(self): pass def test_reindex_items(self): - def _check_cols(before, after, cols): - for col in cols: - assert_almost_equal(after.get(col), before.get(col)) - - # not consolidated - vals = randn(N) - self.mgr.set('g', vals) - reindexed = self.mgr.reindex_items(['g', 'c', 'a', 'd']) + # mgr is not consolidated, f8 & f8-2 blocks + mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;' + 'f: bool; g: f8-2') + + reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0) self.assertEquals(reindexed.nblocks, 2) - assert_almost_equal(reindexed.get('g'), vals.squeeze()) - _check_cols(self.mgr, reindexed, ['c', 'a', 'd']) + assert_almost_equal(reindexed.items, ['g', 'c', 'a', 'd']) + assert_almost_equal(mgr.get('g'), reindexed.get('g')) + assert_almost_equal(mgr.get('c'), reindexed.get('c')) + assert_almost_equal(mgr.get('a'), reindexed.get('a')) + assert_almost_equal(mgr.get('d'), reindexed.get('d')) + + def test_multiindex_xs(self): + mgr = create_mgr('a,b,c: f8; d,e,f: i8') - def test_xs(self): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) - self.mgr.set_axis(1, index) + mgr.set_axis(1, index) + result = mgr.xs('bar', axis=1) + self.assertEqual(result.shape, (6, 2)) + self.assertEqual(result.axes[1][0], ('bar', 'one')) + self.assertEqual(result.axes[1][1], ('bar', 'two')) - result = self.mgr.xs('bar', axis=1) - expected = self.mgr.get_slice(slice(3, 5), axis=1) + def test_get_numeric_data(self): + mgr = create_mgr('int: int; float: float; complex: complex;' + 'str: object; bool: bool; obj: object; dt: datetime', + item_shape=(3,)) + mgr.set('obj', np.array([1, 2, 3], dtype=np.object_)) - assert_frame_equal(DataFrame(result), DataFrame(expected)) + numeric = mgr.get_numeric_data() + assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool']) + assert_almost_equal(mgr.get('float'), numeric.get('float')) - def test_get_numeric_data(self): - int_ser = Series(np.array([0, 1, 2])) - float_ser = Series(np.array([0., 1., 2.])) - complex_ser = Series(np.array([0j, 1j, 2j])) - str_ser = Series(np.array(['a', 'b', 'c'])) - bool_ser = Series(np.array([True, False, True])) - obj_ser = Series(np.array([1, 'a', 5])) - dt_ser = Series(tm.makeDateIndex(3)) - # check types - df = DataFrame({'int': int_ser, 'float': float_ser, - 'complex': complex_ser, 'str': str_ser, - 'bool': bool_ser, 'obj': obj_ser, - 'dt': dt_ser}) - xp = DataFrame({'int': int_ser, 'float': float_ser, - 'complex': complex_ser, 'bool': bool_ser}) - rs = DataFrame(df._data.get_numeric_data()) - assert_frame_equal(xp, rs) - - xp = DataFrame({'bool': bool_ser}) - rs = DataFrame(df._data.get_bool_data()) - assert_frame_equal(xp, rs) - - rs = DataFrame(df._data.get_bool_data()) - df.ix[0, 'bool'] = not df.ix[0, 'bool'] - - self.assertEqual(rs.ix[0, 'bool'], df.ix[0, 'bool']) - - rs = DataFrame(df._data.get_bool_data(copy=True)) - df.ix[0, 'bool'] = not df.ix[0, 'bool'] - - self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool']) + # Check sharing + numeric.set('float', np.array([100., 200., 300.])) + assert_almost_equal(mgr.get('float'), np.array([100., 200., 300.])) + + numeric2 = mgr.get_numeric_data(copy=True) + assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool']) + numeric2.set('float', np.array([1000., 2000., 3000.])) + assert_almost_equal(mgr.get('float'), np.array([100., 200., 300.])) + + def test_get_bool_data(self): + mgr = create_mgr('int: int; float: float; complex: complex;' + 'str: object; bool: bool; obj: object; dt: datetime', + item_shape=(3,)) + mgr.set('obj', np.array([True, False, True], dtype=np.object_)) + + bools = mgr.get_bool_data() + assert_almost_equal(bools.items, ['bool']) + assert_almost_equal(mgr.get('bool'), bools.get('bool')) + + bools.set('bool', np.array([True, False, True])) + assert_almost_equal(mgr.get('bool'), [True, False, True]) + + # Check sharing + bools2 = mgr.get_bool_data(copy=True) + bools2.set('bool', np.array([False, True, False])) + assert_almost_equal(mgr.get('bool'), [True, False, True]) + + def test_unicode_repr_doesnt_raise(self): + str_repr = repr(create_mgr(u('b,\u05d0: object'))) def test_missing_unicode_key(self): df = DataFrame({"a": [1]}) @@ -585,26 +649,342 @@ def test_missing_unicode_key(self): def test_equals(self): # unique items - index = Index(list('abcdef')) - block1 = make_block(np.arange(12).reshape(3,4), list('abc'), index) - block2 = make_block(np.arange(12).reshape(3,4)*10, list('def'), index) - block1.ref_items = block2.ref_items = index - bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])]) - bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])]) + bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2') + bm2 = BlockManager(bm1.blocks[::-1], bm1.axes) self.assertTrue(bm1.equals(bm2)) - # non-unique items - index = Index(list('aaabbb')) - block1 = make_block(np.arange(12).reshape(3,4), list('aaa'), index, - placement=[0,1,2]) - block2 = make_block(np.arange(12).reshape(3,4)*10, list('bbb'), index, - placement=[3,4,5]) - block1.ref_items = block2.ref_items = index - bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])]) - bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])]) + bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2') + bm2 = BlockManager(bm1.blocks[::-1], bm1.axes) self.assertTrue(bm1.equals(bm2)) + def test_single_mgr_ctor(self): + mgr = create_single_mgr('f8', num_rows=5) + self.assertEquals(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.]) + + +class TestIndexing(object): + # Nosetests-style data-driven tests. + # + # This test applies different indexing routines to block managers and + # compares the outcome to the result of same operations on np.ndarray. + # + # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests + # and are disabled. + + MANAGERS = [ + create_single_mgr('f8', N), + create_single_mgr('i8', N), + #create_single_mgr('sparse', N), + create_single_mgr('sparse_na', N), + + # 2-dim + create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)), + create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)), + create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)), + create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)), + #create_mgr('a: sparse', item_shape=(N,)), + create_mgr('a: sparse_na', item_shape=(N,)), + + # 3-dim + create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)), + create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)), + create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)), + create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)), + # create_mgr('a: sparse', item_shape=(1, N)), + ] + + # MANAGERS = [MANAGERS[6]] + + def test_get_slice(self): + def assert_slice_ok(mgr, axis, slobj): + # import pudb; pudb.set_trace() + mat = mgr.as_matrix() + sliced = mgr.get_slice(slobj, axis=axis) + mat_slobj = (slice(None),) * axis + (slobj,) + assert_almost_equal(mat[mat_slobj], sliced.as_matrix()) + assert_almost_equal(mgr.axes[axis][slobj], sliced.axes[axis]) + + for mgr in self.MANAGERS: + for ax in range(mgr.ndim): + # slice + yield assert_slice_ok, mgr, ax, slice(None) + yield assert_slice_ok, mgr, ax, slice(3) + yield assert_slice_ok, mgr, ax, slice(100) + yield assert_slice_ok, mgr, ax, slice(1, 4) + yield assert_slice_ok, mgr, ax, slice(3, 0, -2) + + # boolean mask + yield assert_slice_ok, mgr, ax, np.array([], dtype=np.bool_) + yield (assert_slice_ok, mgr, ax, + np.ones(mgr.shape[ax], dtype=np.bool_)) + yield (assert_slice_ok, mgr, ax, + np.zeros(mgr.shape[ax], dtype=np.bool_)) + + if mgr.shape[ax] >= 3: + yield (assert_slice_ok, mgr, ax, + np.arange(mgr.shape[ax]) % 3 == 0) + yield (assert_slice_ok, mgr, ax, + np.array([True, True, False], dtype=np.bool_)) + + # fancy indexer + yield assert_slice_ok, mgr, ax, [] + yield assert_slice_ok, mgr, ax, lrange(mgr.shape[ax]) + + if mgr.shape[ax] >= 3: + yield assert_slice_ok, mgr, ax, [0, 1, 2] + yield assert_slice_ok, mgr, ax, [-1, -2, -3] + + def test_take(self): + def assert_take_ok(mgr, axis, indexer): + mat = mgr.as_matrix() + taken = mgr.take(indexer, axis) + assert_almost_equal(np.take(mat, indexer, axis), + taken.as_matrix()) + assert_almost_equal(mgr.axes[axis].take(indexer), + taken.axes[axis]) + + for mgr in self.MANAGERS: + for ax in range(mgr.ndim): + # take/fancy indexer + yield assert_take_ok, mgr, ax, [] + yield assert_take_ok, mgr, ax, [0, 0, 0] + yield assert_take_ok, mgr, ax, lrange(mgr.shape[ax]) + + if mgr.shape[ax] >= 3: + yield assert_take_ok, mgr, ax, [0, 1, 2] + yield assert_take_ok, mgr, ax, [-1, -2, -3] + + def test_reindex_axis(self): + def assert_reindex_axis_is_ok(mgr, axis, new_labels, + fill_value): + mat = mgr.as_matrix() + indexer = mgr.axes[axis].get_indexer_for(new_labels) + + reindexed = mgr.reindex_axis(new_labels, axis, + fill_value=fill_value) + assert_almost_equal(com.take_nd(mat, indexer, axis, + fill_value=fill_value), + reindexed.as_matrix()) + assert_almost_equal(reindexed.axes[axis], new_labels) + + for mgr in self.MANAGERS: + for ax in range(mgr.ndim): + for fill_value in (None, np.nan, 100.): + yield assert_reindex_axis_is_ok, mgr, ax, [], fill_value + yield (assert_reindex_axis_is_ok, mgr, ax, + mgr.axes[ax], fill_value) + yield (assert_reindex_axis_is_ok, mgr, ax, + mgr.axes[ax][[0, 0, 0]], fill_value) + yield (assert_reindex_axis_is_ok, mgr, ax, + ['foo', 'bar', 'baz'], fill_value) + yield (assert_reindex_axis_is_ok, mgr, ax, + ['foo', mgr.axes[ax][0], 'baz'], fill_value) + + if mgr.shape[ax] >= 3: + yield (assert_reindex_axis_is_ok, mgr, ax, + mgr.axes[ax][:-3], fill_value) + yield (assert_reindex_axis_is_ok, mgr, ax, + mgr.axes[ax][-3::-1], fill_value) + yield (assert_reindex_axis_is_ok, mgr, ax, + mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value) + + def test_reindex_indexer(self): + def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, + fill_value): + mat = mgr.as_matrix() + reindexed_mat = com.take_nd(mat, indexer, axis, + fill_value=fill_value) + reindexed = mgr.reindex_indexer(new_labels, indexer, axis, + fill_value=fill_value) + assert_almost_equal(reindexed_mat, reindexed.as_matrix()) + assert_almost_equal(reindexed.axes[axis], new_labels) + + for mgr in self.MANAGERS: + for ax in range(mgr.ndim): + for fill_value in (None, np.nan, 100.): + yield (assert_reindex_indexer_is_ok, mgr, ax, + [], [], fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + ['foo'] * mgr.shape[ax], np.arange(mgr.shape[ax]), + fill_value) + + yield (assert_reindex_indexer_is_ok, mgr, ax, + mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), + fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], + fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + ['foo', 'bar', 'baz'], [0, 0, 0], fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + ['foo', 'bar', 'baz'], [-1, 0, -1], fill_value) + yield (assert_reindex_indexer_is_ok, mgr, ax, + ['foo', mgr.axes[ax][0], 'baz'], [-1, -1, -1], + fill_value) + + if mgr.shape[ax] >= 3: + yield (assert_reindex_indexer_is_ok, mgr, ax, + ['foo', 'bar', 'baz'], [0, 1, 2], fill_value) + + + # test_get_slice(slice_like, axis) + # take(indexer, axis) + # reindex_axis(new_labels, axis) + # reindex_indexer(new_labels, indexer, axis) + + + + +class TestBlockPlacement(tm.TestCase): + _multiprocess_can_split_ = True + + def test_slice_len(self): + self.assertEquals(len(BlockPlacement(slice(0, 4))), 4) + self.assertEquals(len(BlockPlacement(slice(0, 4, 2))), 2) + self.assertEquals(len(BlockPlacement(slice(0, 3, 2))), 2) + + self.assertEquals(len(BlockPlacement(slice(0, 1, 2))), 1) + self.assertEquals(len(BlockPlacement(slice(1, 0, -1))), 1) + + def test_zero_step_raises(self): + self.assertRaises(ValueError, BlockPlacement, slice(1, 1, 0)) + self.assertRaises(ValueError, BlockPlacement, slice(1, 2, 0)) + + def test_unbounded_slice_raises(self): + def assert_unbounded_slice_error(slc): + # assertRaisesRegexp is not available in py2.6 + # self.assertRaisesRegexp(ValueError, "unbounded slice", + # lambda: BlockPlacement(slc)) + self.assertRaises(ValueError, BlockPlacement, slc) + + assert_unbounded_slice_error(slice(None, None)) + assert_unbounded_slice_error(slice(10, None)) + assert_unbounded_slice_error(slice(None, None, -1)) + assert_unbounded_slice_error(slice(None, 10, -1)) + + # These are "unbounded" because negative index will change depending on + # container shape. + assert_unbounded_slice_error(slice(-1, None)) + assert_unbounded_slice_error(slice(None, -1)) + assert_unbounded_slice_error(slice(-1, -1)) + assert_unbounded_slice_error(slice(-1, None, -1)) + assert_unbounded_slice_error(slice(None, -1, -1)) + assert_unbounded_slice_error(slice(-1, -1, -1)) + + def test_not_slice_like_slices(self): + def assert_not_slice_like(slc): + self.assertTrue(not BlockPlacement(slc).is_slice_like) + + assert_not_slice_like(slice(0, 0)) + assert_not_slice_like(slice(100, 0)) + + assert_not_slice_like(slice(100, 100, -1)) + assert_not_slice_like(slice(0, 100, -1)) + + self.assertTrue(not BlockPlacement(slice(0, 0)).is_slice_like) + self.assertTrue(not BlockPlacement(slice(100, 100)).is_slice_like) + + def test_array_to_slice_conversion(self): + def assert_as_slice_equals(arr, slc): + self.assertEquals(BlockPlacement(arr).as_slice, slc) + + assert_as_slice_equals([0], slice(0, 1, 1)) + assert_as_slice_equals([100], slice(100, 101, 1)) + + assert_as_slice_equals([0, 1, 2], slice(0, 3, 1)) + assert_as_slice_equals([0, 5, 10], slice(0, 15, 5)) + assert_as_slice_equals([0, 100], slice(0, 200, 100)) + + assert_as_slice_equals([2, 1], slice(2, 0, -1)) + assert_as_slice_equals([2, 1, 0], slice(2, None, -1)) + assert_as_slice_equals([100, 0], slice(100, None, -100)) + + def test_not_slice_like_arrays(self): + def assert_not_slice_like(arr): + self.assertTrue(not BlockPlacement(arr).is_slice_like) + + assert_not_slice_like([]) + assert_not_slice_like([-1]) + assert_not_slice_like([-1, -2, -3]) + assert_not_slice_like([-10]) + assert_not_slice_like([-1]) + assert_not_slice_like([-1, 0, 1, 2]) + assert_not_slice_like([-2, 0, 2, 4]) + assert_not_slice_like([1, 0, -1]) + assert_not_slice_like([1, 1, 1]) + + def test_slice_iter(self): + self.assertEquals(list(BlockPlacement(slice(0, 3))), [0, 1, 2]) + self.assertEquals(list(BlockPlacement(slice(0, 0))), []) + self.assertEquals(list(BlockPlacement(slice(3, 0))), []) + + self.assertEquals(list(BlockPlacement(slice(3, 0, -1))), [3, 2, 1]) + self.assertEquals(list(BlockPlacement(slice(3, None, -1))), + [3, 2, 1, 0]) + + def test_slice_to_array_conversion(self): + def assert_as_array_equals(slc, asarray): + np.testing.assert_array_equal( + BlockPlacement(slc).as_array, + np.asarray(asarray)) + + assert_as_array_equals(slice(0, 3), [0, 1, 2]) + assert_as_array_equals(slice(0, 0), []) + assert_as_array_equals(slice(3, 0), []) + + assert_as_array_equals(slice(3, 0, -1), [3, 2, 1]) + assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0]) + assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1]) + + def test_blockplacement_add(self): + bpl = BlockPlacement(slice(0, 5)) + self.assertEquals(bpl.add(1).as_slice, slice(1, 6, 1)) + self.assertEquals(bpl.add(np.arange(5)).as_slice, + slice(0, 10, 2)) + self.assertEquals(list(bpl.add(np.arange(5, 0, -1))), + [5, 5, 5, 5, 5]) + + def test_blockplacement_add_int(self): + def assert_add_equals(val, inc, result): + self.assertEquals(list(BlockPlacement(val).add(inc)), + result) + + assert_add_equals(slice(0, 0), 0, []) + assert_add_equals(slice(1, 4), 0, [1, 2, 3]) + assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1]) + assert_add_equals(slice(2, None, -1), 0, [2, 1, 0]) + assert_add_equals([1, 2, 4], 0, [1, 2, 4]) + + assert_add_equals(slice(0, 0), 10, []) + assert_add_equals(slice(1, 4), 10, [11, 12, 13]) + assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11]) + assert_add_equals(slice(2, None, -1), 10, [12, 11, 10]) + assert_add_equals([1, 2, 4], 10, [11, 12, 14]) + + assert_add_equals(slice(0, 0), -1, []) + assert_add_equals(slice(1, 4), -1, [0, 1, 2]) + assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0]) + assert_add_equals([1, 2, 4], -1, [0, 1, 3]) + + self.assertRaises(ValueError, + lambda: BlockPlacement(slice(1, 4)).add(-10)) + self.assertRaises(ValueError, + lambda: BlockPlacement([1, 2, 4]).add(-10)) + self.assertRaises(ValueError, + lambda: BlockPlacement(slice(2, None, -1)).add(-1)) + + # def test_blockplacement_array_add(self): + + # assert_add_equals(slice(0, 2), [0, 1, 1], [0, 2, 3]) + # assert_add_equals(slice(2, None, -1), [1, 1, 0], [3, 2, 0]) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) + + + diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 1eb43237c3185..a6c2bb9f56602 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -611,7 +611,7 @@ def test_setitem_change_dtype(self): s = dft['foo', 'two'] dft['foo', 'two'] = s > s.median() assert_series_equal(dft['foo', 'two'], s > s.median()) - tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex) + # tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex) reindexed = dft.reindex(columns=[('foo', 'two')]) assert_series_equal(reindexed['foo', 'two'], s > s.median()) diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 935dfb65a0807..d17e2e2dcb12b 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -14,12 +14,10 @@ from pandas.core.index import (Index, MultiIndex, _get_combined_index, _ensure_index, _get_consensus_names, _all_indexes_same) -from pandas.core.internals import (TimeDeltaBlock, IntBlock, BoolBlock, BlockManager, - make_block, _consolidate) -from pandas.util.decorators import cache_readonly, Appender, Substitution -from pandas.core.common import (PandasError, ABCSeries, - is_timedelta64_dtype, is_datetime64_dtype, - is_integer_dtype, isnull) +from pandas.core.internals import (items_overlap_with_suffix, + concatenate_block_managers) +from pandas.util.decorators import Appender, Substitution +from pandas.core.common import ABCSeries from pandas.io.parsers import TextFileReader import pandas.core.common as com @@ -27,7 +25,7 @@ import pandas.lib as lib import pandas.algos as algos import pandas.hashtable as _hash -import pandas.tslib as tslib + @Substitution('\nleft : DataFrame') @Appender(_merge_doc, indents=0) @@ -186,16 +184,20 @@ def __init__(self, left, right, how='inner', on=None, def get_result(self): join_index, left_indexer, right_indexer = self._get_join_info() - # this is a bit kludgy - ldata, rdata = self._get_merge_data() + ldata, rdata = self.left._data, self.right._data + lsuf, rsuf = self.suffixes + + llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, + rdata.items, rsuf) + + lindexers = {1: left_indexer} if left_indexer is not None else {} + rindexers = {1: right_indexer} if right_indexer is not None else {} - # TODO: more efficiently handle group keys to avoid extra - # consolidation! - join_op = _BlockJoinOperation([ldata, rdata], join_index, - [left_indexer, right_indexer], axis=1, - copy=self.copy) + result_data = concatenate_block_managers( + [(ldata, lindexers), (rdata, rindexers)], + axes=[llabels.append(rlabels), join_index], + concat_axis=0, copy=self.copy) - result_data = join_op.get_result() result = DataFrame(result_data).__finalize__(self, method='merge') self._maybe_add_join_keys(result, left_indexer, right_indexer) @@ -281,8 +283,18 @@ def _get_merge_data(self): """ ldata, rdata = self.left._data, self.right._data lsuf, rsuf = self.suffixes - ldata, rdata = ldata._maybe_rename_join(rdata, lsuf, rsuf, - copydata=False) + + llabels, rlabels = items_overlap_with_suffix( + ldata.items, lsuf, rdata.items, rsuf) + + if not llabels.equals(ldata.items): + ldata = ldata.copy(deep=False) + ldata.set_axis(0, llabels) + + if not rlabels.equals(rdata.items): + rdata = rdata.copy(deep=False) + rdata.set_axis(0, rlabels) + return ldata, rdata def _get_merge_keys(self): @@ -410,14 +422,14 @@ def _validate_specification(self): if self.right_index: if len(self.left_on) != self.right.index.nlevels: raise ValueError('len(left_on) must equal the number ' - 'of levels in the index of "right"') + 'of levels in the index of "right"') self.right_on = [None] * n elif self.right_on is not None: n = len(self.right_on) if self.left_index: if len(self.right_on) != self.left.index.nlevels: raise ValueError('len(right_on) must equal the number ' - 'of levels in the index of "left"') + 'of levels in the index of "left"') self.left_on = [None] * n if len(self.right_on) != len(self.left_on): raise ValueError("len(right_on) must equal len(left_on)") @@ -487,7 +499,11 @@ def get_result(self): join_index, left_indexer, right_indexer = self._get_join_info() # this is a bit kludgy - ldata, rdata = self._get_merge_data() + ldata, rdata = self.left._data, self.right._data + lsuf, rsuf = self.suffixes + + llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, + rdata.items, rsuf) if self.fill_method == 'ffill': left_join_indexer = algos.ffill_indexer(left_indexer) @@ -496,11 +512,14 @@ def get_result(self): left_join_indexer = left_indexer right_join_indexer = right_indexer - join_op = _BlockJoinOperation([ldata, rdata], join_index, - [left_join_indexer, right_join_indexer], - axis=1, copy=self.copy) + lindexers = {1: left_join_indexer} if left_join_indexer is not None else {} + rindexers = {1: right_join_indexer} if right_join_indexer is not None else {} + + result_data = concatenate_block_managers( + [(ldata, lindexers), (rdata, rindexers)], + axes=[llabels.append(rlabels), join_index], + concat_axis=0, copy=self.copy) - result_data = join_op.get_result() result = DataFrame(result_data) self._maybe_add_join_keys(result, left_indexer, right_indexer) @@ -640,238 +659,6 @@ def _sort_labels(uniques, left, right): return new_left, new_right -class _BlockJoinOperation(object): - """ - BlockJoinOperation made generic for N DataFrames - - Object responsible for orchestrating efficient join operation between two - BlockManager data structures - """ - def __init__(self, data_list, join_index, indexers, axis=1, copy=True): - if axis <= 0: # pragma: no cover - raise MergeError('Only axis >= 1 supported for this operation') - - if len(data_list) != len(indexers): - raise AssertionError("data_list and indexers must have the same " - "length") - - self.units = [] - for data, indexer in zip(data_list, indexers): - if not data.is_consolidated(): - data = data.consolidate() - data._set_ref_locs() - self.units.append(_JoinUnit(data.blocks, indexer)) - - self.join_index = join_index - self.axis = axis - self.copy = copy - self.offsets = None - - # do NOT sort - self.result_items = _concat_indexes([d.items for d in data_list]) - self.result_axes = list(data_list[0].axes) - self.result_axes[0] = self.result_items - self.result_axes[axis] = self.join_index - - def _prepare_blocks(self): - blockmaps = [] - - for unit in self.units: - join_blocks = unit.get_upcasted_blocks() - type_map = {} - for blk in join_blocks: - type_map.setdefault(blk.ftype, []).append(blk) - blockmaps.append((unit, type_map)) - - return blockmaps - - def get_result(self): - """ - Returns - ------- - merged : BlockManager - """ - blockmaps = self._prepare_blocks() - kinds = _get_merge_block_kinds(blockmaps) - - # maybe want to enable flexible copying <-- what did I mean? - kind_blocks = [] - for klass in kinds: - klass_blocks = [] - for unit, mapping in blockmaps: - if klass in mapping: - klass_blocks.extend((unit, b) for b in mapping[klass]) - - # blocks that we are going to merge - kind_blocks.append(klass_blocks) - - # create the merge offsets, essentially where the resultant blocks go in the result - if not self.result_items.is_unique: - - # length of the merges for each of the klass blocks - self.offsets = np.zeros(len(blockmaps)) - for kb in kind_blocks: - kl = list(b.get_merge_length() for unit, b in kb) - self.offsets += np.array(kl) - - # merge the blocks to create the result blocks - result_blocks = [] - for klass_blocks in kind_blocks: - res_blk = self._get_merged_block(klass_blocks) - result_blocks.append(res_blk) - - return BlockManager(result_blocks, self.result_axes) - - def _get_merged_block(self, to_merge): - if len(to_merge) > 1: - - # placement set here - return self._merge_blocks(to_merge) - else: - unit, block = to_merge[0] - blk = unit.reindex_block(block, self.axis, - self.result_items, copy=self.copy) - - # set placement / invalidate on a unique result - if self.result_items.is_unique and blk._ref_locs is not None: - if not self.copy: - blk = blk.copy() - blk.set_ref_locs(None) - - return blk - - - def _merge_blocks(self, merge_chunks): - """ - merge_chunks -> [(_JoinUnit, Block)] - """ - funit, fblock = merge_chunks[0] - fidx = funit.indexer - - out_shape = list(fblock.get_values().shape) - - n = len(fidx) if fidx is not None else out_shape[self.axis] - - merge_lengths = list(blk.get_merge_length() for unit, blk in merge_chunks) - out_shape[0] = sum(merge_lengths) - out_shape[self.axis] = n - - # Should use Fortran order?? - block_dtype = _get_block_dtype([x[1] for x in merge_chunks]) - out = np.empty(out_shape, dtype=block_dtype) - - sofar = 0 - for unit, blk in merge_chunks: - out_chunk = out[sofar: sofar + len(blk)] - com.take_nd(blk.get_values(), unit.indexer, self.axis, out=out_chunk) - sofar += len(blk) - - # does not sort - new_block_items = _concat_indexes([b.items for _, b in merge_chunks]) - - # need to set placement if we have a non-unique result - # calculate by the existing placement plus the offset in the result set - placement = None - if not self.result_items.is_unique: - placement = [] - offsets = np.append(np.array([0]),self.offsets.cumsum()[:-1]) - for (unit, blk), offset in zip(merge_chunks,offsets): - placement.extend(blk.ref_locs+offset) - - return make_block(out, new_block_items, self.result_items, placement=placement) - - -class _JoinUnit(object): - """ - Blocks plus indexer - """ - - def __init__(self, blocks, indexer): - self.blocks = blocks - self.indexer = indexer - - @cache_readonly - def mask_info(self): - if self.indexer is None or not _may_need_upcasting(self.blocks): - return None - else: - mask = self.indexer == -1 - needs_masking = mask.any() - return (mask, needs_masking) - - def get_upcasted_blocks(self): - # will short-circuit and not compute needs_masking if indexer is None - if self.mask_info is not None and self.mask_info[1]: - return _upcast_blocks(self.blocks) - return self.blocks - - def reindex_block(self, block, axis, ref_items, copy=True): - if self.indexer is None: - result = block.copy() if copy else block - else: - result = block.reindex_axis(self.indexer, axis=axis, - mask_info=self.mask_info) - result.ref_items = ref_items - return result - - -def _may_need_upcasting(blocks): - for block in blocks: - if isinstance(block, (IntBlock, BoolBlock)) and not isinstance(block, TimeDeltaBlock): - return True - return False - - -def _upcast_blocks(blocks): - """ - Upcast and consolidate if necessary - """ - new_blocks = [] - for block in blocks: - if isinstance(block, TimeDeltaBlock): - # these are int blocks underlying, but are ok - newb = block - elif isinstance(block, IntBlock): - newb = make_block(block.values.astype(float), block.items, - block.ref_items, placement=block._ref_locs) - elif isinstance(block, BoolBlock): - newb = make_block(block.values.astype(object), block.items, - block.ref_items, placement=block._ref_locs) - else: - newb = block - new_blocks.append(newb) - - # use any ref_items - return _consolidate(new_blocks, newb.ref_items) - - -def _get_all_block_kinds(blockmaps): - kinds = set() - for mapping in blockmaps: - kinds |= set(mapping) - return kinds - - -def _get_merge_block_kinds(blockmaps): - kinds = set() - for _, mapping in blockmaps: - kinds |= set(mapping) - return kinds - - -def _get_block_dtype(blocks): - if len(blocks) == 0: - return object - blk1 = blocks[0] - dtype = blk1.dtype - - if issubclass(dtype.type, np.floating): - for blk in blocks: - if blk.dtype.type == np.float64: - return blk.dtype - - return dtype - #---------------------------------------------------------------------- # Concatenate DataFrame objects @@ -1061,220 +848,38 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, self.new_axes = self._get_new_axes() def get_result(self): - if self._is_series and self.axis == 0: - new_data = com._concat_compat([x.get_values() for x in self.objs]) - name = com._consensus_name_attr(self.objs) - new_data = self._post_merge(new_data) - return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat') - elif self._is_series: - data = dict(zip(range(len(self.objs)), self.objs)) - index, columns = self.new_axes - tmpdf = DataFrame(data, index=index) - if columns is not None: - tmpdf.columns = columns - return tmpdf.__finalize__(self, method='concat') + if self._is_series: + if self.axis == 0: + new_data = com._concat_compat([x.get_values() for x in self.objs]) + name = com._consensus_name_attr(self.objs) + return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat') + else: + data = dict(zip(range(len(self.objs)), self.objs)) + index, columns = self.new_axes + tmpdf = DataFrame(data, index=index) + if columns is not None: + tmpdf.columns = columns + return tmpdf.__finalize__(self, method='concat') else: - new_data = self._get_concatenated_data() - new_data = self._post_merge(new_data) - return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat') + mgrs_indexers = [] + for obj in self.objs: + mgr = obj._data + indexers = {} + for ax, new_labels in enumerate(self.new_axes): + if ax == self.axis: + # Suppress reindexing on concat axis + continue - def _post_merge(self, data): - if isinstance(data, BlockManager): - data = data.post_merge(self.objs) - return data - - def _get_fresh_axis(self): - return Index(np.arange(len(self._get_concat_axis()))) - - def _prepare_blocks(self): - reindexed_data = self._get_reindexed_data() - - # we are consolidating as we go, so just add the blocks, no-need for dtype mapping - blockmaps = [] - for data in reindexed_data: - data = data.consolidate() - data._set_ref_locs() - blockmaps.append(data.get_block_map(typ='dict')) - return blockmaps, reindexed_data - - def _get_concatenated_data(self): - # need to conform to same other (joined) axes for block join - blockmaps, rdata = self._prepare_blocks() - kinds = _get_all_block_kinds(blockmaps) - - try: - # need to conform to same other (joined) axes for block join - new_blocks = [] - for kind in kinds: - klass_blocks = [] - for mapping in blockmaps: - l = mapping.get(kind) - if l is None: - l = [ None ] - klass_blocks.extend(l) - stacked_block = self._concat_blocks(klass_blocks) - new_blocks.append(stacked_block) - - if self.axis == 0 and self.ignore_index: - self.new_axes[0] = self._get_fresh_axis() - - for blk in new_blocks: - blk.ref_items = self.new_axes[0] - - new_data = BlockManager(new_blocks, self.new_axes) - - # Eventual goal would be to move everything to PandasError or other explicit error - except (Exception, PandasError): # EAFP - - # should not be possible to fail here for the expected reason with - # axis = 0 - if self.axis == 0: # pragma: no cover - raise - - new_data = {} - for item in self.new_axes[0]: - new_data[item] = self._concat_single_item(rdata, item) - - return new_data - - def _get_reindexed_data(self): - # HACK: ugh - - reindexed_data = [] - axes_to_reindex = list(enumerate(self.new_axes)) - axes_to_reindex.pop(self.axis) - - for obj in self.objs: - data = obj._data.prepare_for_merge() - for i, ax in axes_to_reindex: - data = data.reindex_axis(ax, axis=i, copy=False) - reindexed_data.append(data) - - return reindexed_data - - def _concat_blocks(self, blocks): - - values_list = [b.get_values() for b in blocks if b is not None] - concat_values = com._concat_compat(values_list, axis=self.axis) - - if self.axis > 0: - # Not safe to remove this check, need to profile - if not _all_indexes_same([b.items for b in blocks]): - # TODO: Either profile this piece or remove. - # FIXME: Need to figure out how to test whether this line exists or does not...(unclear if even possible - # or maybe would require performance test) - raise PandasError('dtypes are not consistent throughout ' - 'DataFrames') - return make_block(concat_values, - blocks[0].items, - self.new_axes[0], - placement=blocks[0]._ref_locs) - else: + obj_labels = mgr.axes[ax] + if not new_labels.equals(obj_labels): + indexers[ax] = obj_labels.reindex(new_labels)[1] - offsets = np.r_[0, np.cumsum([len(x._data.axes[0]) for - x in self.objs])] - indexer = np.concatenate([offsets[i] + b.ref_locs - for i, b in enumerate(blocks) - if b is not None]) - if self.ignore_index: - concat_items = indexer - else: - concat_items = self.new_axes[0].take(indexer) - - if self.ignore_index: - ref_items = self._get_fresh_axis() - return make_block(concat_values, concat_items, ref_items) - - block = make_block(concat_values, concat_items, self.new_axes[0]) - - # we need to set the ref_locs in this block so we have the mapping - # as we now have a non-unique index across dtypes, and we need to - # map the column location to the block location - # GH3602 - if not self.new_axes[0].is_unique: - block.set_ref_locs(indexer) - - return block - - def _concat_single_item(self, objs, item): - # this is called if we don't have consistent dtypes in a row-wise append - all_values = [] - dtypes = [] - alls = set() - - # figure out the resulting dtype of the combination - for data, orig in zip(objs, self.objs): - d = dict([ (t,False) for t in ['object','datetime','timedelta','other'] ]) - if item in orig: - values = data.get(item) - if hasattr(values,'to_dense'): - values = values.to_dense() - all_values.append(values) - - dtype = values.dtype - - if issubclass(dtype.type, (np.object_, np.bool_)): - d['object'] = True - alls.add('object') - elif is_datetime64_dtype(dtype): - d['datetime'] = True - alls.add('datetime') - elif is_timedelta64_dtype(dtype): - d['timedelta'] = True - alls.add('timedelta') - else: - d['other'] = True - alls.add('other') + mgrs_indexers.append((obj._data, indexers)) - else: - all_values.append(None) - d['other'] = True - alls.add('other') - - dtypes.append(d) - - if 'datetime' in alls or 'timedelta' in alls: - - if 'object' in alls or 'other' in alls: - - for v, d in zip(all_values,dtypes): - if d.get('datetime') or d.get('timedelta'): - pass - - # if we have all null, then leave a date/time like type - # if we have only that type left - elif v is None or isnull(v).all(): - - alls.discard('other') - alls.discard('object') - - # create the result - if 'object' in alls: - empty_dtype, fill_value = np.object_, np.nan - elif 'other' in alls: - empty_dtype, fill_value = np.float64, np.nan - elif 'datetime' in alls: - empty_dtype, fill_value = 'M8[ns]', tslib.iNaT - elif 'timedelta' in alls: - empty_dtype, fill_value = 'm8[ns]', tslib.iNaT - else: # pragma - raise AssertionError("invalid dtype determination in concat_single_item") - - to_concat = [] - for obj, item_values in zip(objs, all_values): - if item_values is None or isnull(item_values).all(): - shape = obj.shape[1:] - missing_arr = np.empty(shape, dtype=empty_dtype) - missing_arr.fill(fill_value) - to_concat.append(missing_arr) - else: - to_concat.append(item_values) + new_data = concatenate_block_managers( + mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=True) - # this method only gets called with axis >= 1 - if self.axis < 1: - raise AssertionError("axis must be >= 1, input was" - " {0}".format(self.axis)) - return com._concat_compat(to_concat, axis=self.axis - 1) + return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat') def _get_result_dim(self): if self._is_series and self.axis == 1: @@ -1303,13 +908,7 @@ def _get_new_axes(self): for i, ax in zip(indices, self.join_axes): new_axes[i] = ax - if self.ignore_index: - concat_axis = None - else: - concat_axis = self._get_concat_axis() - - new_axes[self.axis] = concat_axis - + new_axes[self.axis] = self._get_concat_axis() return new_axes def _get_comb_axis(self, i): @@ -1325,9 +924,16 @@ def _get_comb_axis(self, i): return _get_combined_index(all_indexes, intersect=self.intersect) def _get_concat_axis(self): + """ + Return index to be used along concatenation axis. + """ if self._is_series: if self.axis == 0: indexes = [x.index for x in self.objs] + elif self.ignore_index: + idx = Index(np.arange(len(self.objs))) + idx.is_unique = True # arange is always unique + return idx elif self.keys is None: names = [] for x in self.objs: @@ -1338,13 +944,21 @@ def _get_concat_axis(self): if x.name is not None: names.append(x.name) else: - return Index(np.arange(len(self.objs))) + idx = Index(np.arange(len(self.objs))) + idx.is_unique = True + return idx + return Index(names) else: return _ensure_index(self.keys) else: indexes = [x._data.axes[self.axis] for x in self.objs] + if self.ignore_index: + idx = Index(np.arange(sum(len(i) for i in indexes))) + idx.is_unique = True + return idx + if self.keys is None: concat_axis = _concat_indexes(indexes) else: diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 146c244e7d775..8e11c78ecd135 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -584,6 +584,19 @@ def test_merge_different_column_key_names(self): assert_almost_equal(merged['value_x'], [2, 3, 1, 1, 4, 4, np.nan]) assert_almost_equal(merged['value_y'], [6, np.nan, 5, 8, 5, 8, 7]) + def test_merge_copy(self): + left = DataFrame({'a': 0, 'b': 1}, index=lrange(10)) + right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10)) + + merged = merge(left, right, left_index=True, + right_index=True, copy=True) + + merged['a'] = 6 + self.assert_((left['a'] == 0).all()) + + merged['d'] = 'peekaboo' + self.assert_((right['d'] == 'bar').all()) + def test_merge_nocopy(self): left = DataFrame({'a': 0, 'b': 1}, index=lrange(10)) right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10)) @@ -1765,11 +1778,14 @@ def test_panel_join_overlap(self): p1 = panel.ix[['ItemA', 'ItemB', 'ItemC']] p2 = panel.ix[['ItemB', 'ItemC']] + # Expected index is + # + # ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2 joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2') p1_suf = p1.ix[['ItemB', 'ItemC']].add_suffix('_p1') p2_suf = p2.ix[['ItemB', 'ItemC']].add_suffix('_p2') no_overlap = panel.ix[['ItemA']] - expected = p1_suf.join(p2_suf).join(no_overlap) + expected = no_overlap.join(p1_suf.join(p2_suf)) tm.assert_panel_equal(joined, expected) def test_panel_join_many(self): diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 23a6ae0982771..dd72a5245e7b2 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -337,7 +337,8 @@ def _take_new_index(obj, indexer, new_index, axis=0): elif isinstance(obj, DataFrame): if axis == 1: raise NotImplementedError - return DataFrame(obj._data.take(indexer, new_index=new_index, axis=1)) + return DataFrame(obj._data.reindex_indexer( + new_axis=new_index, indexer=indexer, axis=1)) else: raise NotImplementedError diff --git a/vb_suite/eval.py b/vb_suite/eval.py index 3b0efa9e88f48..36aa702b5602a 100644 --- a/vb_suite/eval.py +++ b/vb_suite/eval.py @@ -55,7 +55,7 @@ start_date=datetime(2013, 7, 26)) eval_frame_mult_python = \ - Benchmark("pdl.eval('df * df2 * df3 * df4', engine='python')", + Benchmark("pd.eval('df * df2 * df3 * df4', engine='python')", common_setup, name='eval_frame_mult_python', start_date=datetime(2013, 7, 21)) @@ -102,7 +102,7 @@ name='eval_frame_chained_cmp_one_thread', start_date=datetime(2013, 7, 26)) -setup = common_setup +# setup = common_setup eval_frame_chained_cmp_python = \ Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", common_setup, name='eval_frame_chained_cmp_python',
This is a WIP request for Stage 1 deliverable of #6744. Roadmap: - [x] make ref_locs primary source of information (leaving items/ref_items in place to back it up and avoid breakage) - [x] port groupby to loc-based implementation (there's quite a number of hacks a.t.m. that make this non-trivial) - [x] ditto for merge/join/concat - [x] ditto for io code - [x] ditto for reshape - [x] clean up Block `items` - [x] clean up Block `ref_items` - [x] fix performance issues & integrate with mainline
https://api.github.com/repos/pandas-dev/pandas/pulls/6745
2014-03-30T18:16:10Z
2014-04-25T14:22:31Z
2014-04-25T14:22:30Z
2020-05-01T19:33:53Z
API: all offset operations now return Timestamp types (rather than datetime) (GH4069)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2646dd33535a1..c0d4c0c73296f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -150,6 +150,8 @@ API Changes - ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) +- all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index eb40f1f520cff..9130d0f3d8102 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -472,9 +472,9 @@ def _set_busdaycalendar(self): kwargs = {'weekmask':self.weekmask,'holidays':self.holidays} else: kwargs = {'weekmask':self.weekmask} - try: + try: self.busdaycalendar = np.busdaycalendar(**kwargs) - except: + except: # Check we have the required numpy version from distutils.version import LooseVersion @@ -484,9 +484,9 @@ def _set_busdaycalendar(self): np.__version__) else: raise - + def __getstate__(self): - """"Return a pickleable state""" + """Return a pickleable state""" state = self.__dict__.copy() del state['busdaycalendar'] return state @@ -520,7 +520,7 @@ def apply(self, other): if self.offset: result = result + self.offset - return result + return as_timestamp(result) elif isinstance(other, np.datetime64): dtype = other.dtype @@ -539,7 +539,7 @@ def apply(self, other): if self.offset: result = result + self.offset - return result + return as_timestamp(result) elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, @@ -639,7 +639,7 @@ def apply(self, other): if other.weekday() > 4: other = other - BDay() - return other + return as_timestamp(other) _prefix = 'BM' @@ -706,7 +706,7 @@ def isAnchored(self): def apply(self, other): if self.weekday is None: - return as_datetime(other) + self.n * self._inc + return as_timestamp(as_datetime(other) + self.n * self._inc) if self.n > 0: k = self.n @@ -998,7 +998,7 @@ def apply(self, other): if other.weekday() > 4: other = other - BDay() - return other + return as_timestamp(other) def onOffset(self, dt): modMonth = (dt.month - self.startingMonth) % 3 @@ -1188,7 +1188,7 @@ def apply(self, other): if result.weekday() > 4: result = result - BDay() - return result + return as_timestamp(result) class BYearBegin(YearOffset): diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index b303b7bb50526..07daf7f22afb7 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -99,7 +99,7 @@ class TestBase(tm.TestCase): def test_apply_out_of_range(self): if self._offset is None: - raise nose.SkipTest("_offset not defined") + raise nose.SkipTest("_offset not defined to test out-of-range") # try to create an out-of-bounds result timestamp; if we can't create the offset # skip @@ -113,6 +113,17 @@ def test_apply_out_of_range(self): except (ValueError, KeyError): raise nose.SkipTest("cannot create out_of_range offset") + def test_return_type(self): + + # make sure that we are returning a Timestamp + try: + offset = self._offset(1) + except: + raise nose.SkipTest("_offset not defined to test return_type") + + result = Timestamp('20080101') + offset + self.assertIsInstance(result, Timestamp) + class TestDateOffset(TestBase): _multiprocess_can_split_ = True
Business/Week frequencies were incorrect closes #4069
https://api.github.com/repos/pandas-dev/pandas/pulls/6743
2014-03-30T15:11:07Z
2014-03-30T15:23:19Z
2014-03-30T15:23:19Z
2014-06-27T23:41:02Z
DOC: small doc fixes (build warning in sym_diff + formatting in release notes)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 58232e226e277..f02b286365ea7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -91,6 +91,7 @@ API Changes - ``microsecond,nanosecond,qyear`` - ``min(),max()`` - ``pd.infer_freq()`` + - ``pd.infer_freq()`` will now raise a ``TypeError`` if given an invalid ``Series/Index`` type (:issue:`6407`, :issue:`6463`) @@ -99,12 +100,12 @@ API Changes (:issue:`5987`). For the :class:`~pandas.DataFrame` methods, two things have changed - - Column names are now given precedence over locals - - Local variables must be referred to explicitly. This means that even if - you have a local variable that is *not* a column you must still refer to - it with the ``'@'`` prefix. - - You can have an expression like ``df.query('@a < a')`` with no complaints - from ``pandas`` about ambiguity of the name ``a``. + - Column names are now given precedence over locals + - Local variables must be referred to explicitly. This means that even if + you have a local variable that is *not* a column you must still refer to + it with the ``'@'`` prefix. + - You can have an expression like ``df.query('@a < a')`` with no complaints + from ``pandas`` about ambiguity of the name ``a``. - The top-level :func:`pandas.eval` function does not allow you use the ``'@'`` prefix and provides you with an error message telling you so. @@ -115,6 +116,7 @@ API Changes longer change type of the resulting index (:issue:`6440`). - ``set_index`` no longer converts MultiIndexes to an Index of tuples (:issue:`6459`). - Slicing with negative start, stop & step values handles corner cases better (:issue:`6531`): + - ``df.iloc[:-len(df)]`` is now empty - ``df.iloc[len(df)::-1]`` now enumerates all elements in reverse @@ -136,16 +138,15 @@ API Changes - Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`) -- Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. +- Following keywords are now acceptable for :meth:`DataFrame.plot` with ``kind='bar'`` and ``kind='barh'``: - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) + - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center). (:issue:`6604`) -- Define and document the order of column vs index names in query/eval - (:issue:`6676`) +- Define and document the order of column vs index names in query/eval (:issue:`6676`) - ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 344198d6e5ef1..428e7936ee126 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -22,23 +22,23 @@ API changes values. A single indexer / list of indexers that is out-of-bounds will still raise ``IndexError`` (:issue:`6296`, :issue:`6299`). This could result in an empty axis (e.g. an empty DataFrame being returned) -.. ipython:: python + .. ipython:: python - dfl = DataFrame(np.random.randn(5,2),columns=list('AB')) - dfl - dfl.iloc[:,2:3] - dfl.iloc[:,1:3] - dfl.iloc[4:6] + dfl = DataFrame(np.random.randn(5,2),columns=list('AB')) + dfl + dfl.iloc[:,2:3] + dfl.iloc[:,1:3] + dfl.iloc[4:6] -These are out-of-bounds selections + These are out-of-bounds selections -.. code-block:: python + .. code-block:: python - dfl.iloc[[4,5,6]] - IndexError: positional indexers are out-of-bounds + dfl.iloc[[4,5,6]] + IndexError: positional indexers are out-of-bounds - dfl.iloc[:,4] - IndexError: single positional indexer is out-of-bounds + dfl.iloc[:,4] + IndexError: single positional indexer is out-of-bounds - The ``DataFrame.interpolate()`` ``downcast`` keyword default has been changed from ``infer`` to @@ -181,7 +181,7 @@ These are out-of-bounds selections - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) - Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. + Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. - ``pairwise`` keyword was added to the statistical moment functions ``rolling_cov``, ``rolling_corr``, ``ewmcov``, ``ewmcorr``, diff --git a/pandas/core/index.py b/pandas/core/index.py index 32c1672566da0..3213f288be4b3 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1059,7 +1059,7 @@ def diff(self, other): def sym_diff(self, other, result_name=None): """ - Compute the sorted symmetric_difference of two Index objects. + Compute the sorted symmetric difference of two Index objects. Parameters ---------- @@ -1077,7 +1077,7 @@ def sym_diff(self, other, result_name=None): ``idx2`` but not both. Equivalent to the Index created by ``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped. - The sorting of a result containing ``NaN``s is not guaranteed + The sorting of a result containing ``NaN`` values is not guaranteed across Python versions. See GitHub issue #6444. Examples
https://api.github.com/repos/pandas-dev/pandas/pulls/6739
2014-03-29T17:21:01Z
2014-03-29T17:21:36Z
2014-03-29T17:21:36Z
2014-07-16T08:59:49Z
BUG: Bug in downcasting inference with empty arrays (GH6733)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 3d4ff0610f43f..58232e226e277 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -292,6 +292,7 @@ Bug Fixes - Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) - Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) - Bug in resample when ``how=None`` resample freq is the same as the axis frequency (:issue:`5955`) +- Bug in downcasting inference with empty arrays (:issue:`6733`) pandas 0.13.1 ------------- diff --git a/pandas/core/common.py b/pandas/core/common.py index daeb43c7e76ac..b33ee6d66f901 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1084,7 +1084,7 @@ def _possibly_downcast_to_dtype(result, dtype): or could be an astype of float64->float32 """ - if np.isscalar(result) or not len(result): + if np.isscalar(result): return result trans = lambda x: x @@ -1114,15 +1114,19 @@ def _possibly_downcast_to_dtype(result, dtype): try: - # don't allow upcasts here + # don't allow upcasts here (except if empty) if dtype.kind == result.dtype.kind: - if result.dtype.itemsize <= dtype.itemsize: + if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape): return result if issubclass(dtype.type, np.floating): return result.astype(dtype) elif dtype == np.bool_ or issubclass(dtype.type, np.integer): + # if we don't have any elements, just astype it + if not np.prod(result.shape): + return trans(result).astype(dtype) + # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([r[0]]) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 59bfce8d9d636..7185b684a1e12 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -166,6 +166,12 @@ def test_downcast_conv(): result = com._possibly_downcast_to_dtype(arr,'infer') tm.assert_almost_equal(result, expected) + # empties + for dtype in [np.int32,np.float64,np.float32,np.bool_,np.int64,object]: + arr = np.array([],dtype=dtype) + result = com._possibly_downcast_to_dtype(arr,'int64') + tm.assert_almost_equal(result, np.array([],dtype=np.int64)) + assert result.dtype == np.int64 def test_array_equivalent(): assert array_equivalent(np.array([np.nan, np.nan]), @@ -182,10 +188,10 @@ def test_array_equivalent(): np.array([np.nan, 2, np.nan])) assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e'])) assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan])) - assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan])) - assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan])) + assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan])) + assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan])) assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan])) - + def test_datetimeindex_from_empty_datetime64_array(): for unit in [ 'ms', 'us', 'ns' ]: idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit)) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 79eac770f547e..3ff40b1adf156 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2237,6 +2237,14 @@ def test_groupby_aggregation_mixed_dtype(self): result = g[['v1','v2']].mean() assert_frame_equal(result,expected) + + def test_groupby_dtype_inference_empty(self): + # GH 6733 + df = DataFrame({'x': [], 'range': np.arange(0)}) + result = df.groupby('x').first() + expected = DataFrame({'range' : Series([],index=Index([],name='x'),dtype='int64') }) + assert_frame_equal(result,expected,by_blocks=True) + def test_groupby_list_infer_array_like(self): result = self.df.groupby(list(self.df['A'])).mean() expected = self.df.groupby(self.df['A']).mean() @@ -3862,20 +3870,20 @@ def test_lexsort_indexer(self): result = _lexsort_indexer(keys, orders=True, na_position='last') expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) assert_equal(result, expected) - + # orders=True, na_position='first' result = _lexsort_indexer(keys, orders=True, na_position='first') expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) assert_equal(result, expected) - + # orders=False, na_position='last' result = _lexsort_indexer(keys, orders=False, na_position='last') - expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) + expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) assert_equal(result, expected) - + # orders=False, na_position='first' result = _lexsort_indexer(keys, orders=False, na_position='first') - expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) + expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) assert_equal(result, expected) def test_nargsort(self): @@ -3887,7 +3895,7 @@ def test_nargsort(self): try: # GH 2785; due to a regression in NumPy1.6.2 np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i')) - np.argsort(items2, kind='mergesort') + np.argsort(items2, kind='mergesort') except TypeError as err: raise nose.SkipTest('requested sort not available for type') @@ -3898,7 +3906,7 @@ def test_nargsort(self): # because quick and merge sort fall over to insertion sort for small # arrays.""" - + # mergesort, ascending=True, na_position='last' result = _nargsort( items, kind='mergesort', ascending=True, na_position='last')
closes #6733
https://api.github.com/repos/pandas-dev/pandas/pulls/6737
2014-03-29T14:28:55Z
2014-03-29T14:46:24Z
2014-03-29T14:46:24Z
2014-07-16T08:59:47Z
ENH: drop function now has errors keyword for non-existing column handling
diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index 02de919e3f83e..6a14a4024ba5a 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -49,7 +49,7 @@ API changes In [3]: cat = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c']) In [4]: cat - Out[4]: + Out[4]: [a, b, a] Categories (3, object): [a < b < c] diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index 5e75d9ed011a2..df21e51d100fc 100644 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -23,6 +23,13 @@ Enhancements +- ``drop`` function can now accept ``errors`` keyword to suppress ValueError raised when any of label does not exist in the target data. (:issue:`6736`) + + .. ipython:: python + + df = DataFrame(np.random.randn(3, 3), columns=['A', 'B', 'C']) + df.drop(['A', 'X'], axis=1, errors='ignore') + .. _whatsnew_0161.api: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 012a73fac1ef4..30a3601a5a4bc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1557,7 +1557,7 @@ def reindex_like(self, other, method=None, copy=True, limit=None): return self.reindex(**d) - def drop(self, labels, axis=0, level=None, inplace=False): + def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'): """ Return new object with labels in requested axis removed @@ -1569,6 +1569,8 @@ def drop(self, labels, axis=0, level=None, inplace=False): For MultiIndex inplace : bool, default False If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. Returns ------- @@ -1582,9 +1584,9 @@ def drop(self, labels, axis=0, level=None, inplace=False): if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') - new_axis = axis.drop(labels, level=level) + new_axis = axis.drop(labels, level=level, errors=errors) else: - new_axis = axis.drop(labels) + new_axis = axis.drop(labels, errors=errors) dropped = self.reindex(**{axis_name: new_axis}) try: dropped.axes[axis_].set_names(axis.names, inplace=True) diff --git a/pandas/core/index.py b/pandas/core/index.py index e335d00551bab..fd11cd7f598c3 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2325,13 +2325,15 @@ def insert(self, loc, item): (_self[:loc], item_idx, _self[loc:])) return Index(idx, name=self.name) - def drop(self, labels): + def drop(self, labels, errors='raise'): """ Make new Index with passed list of labels deleted Parameters ---------- labels : array-like + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. Returns ------- @@ -2341,7 +2343,9 @@ def drop(self, labels): indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): - raise ValueError('labels %s not contained in axis' % labels[mask]) + if errors != 'ignore': + raise ValueError('labels %s not contained in axis' % labels[mask]) + indexer = indexer[~mask] return self.delete(indexer) @Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs) @@ -3847,7 +3851,7 @@ def repeat(self, n): sortorder=self.sortorder, verify_integrity=False) - def drop(self, labels, level=None): + def drop(self, labels, level=None, errors='raise'): """ Make new MultiIndex with passed list of labels deleted @@ -3870,19 +3874,24 @@ def drop(self, labels, level=None): indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): - raise ValueError('labels %s not contained in axis' - % labels[mask]) - return self.delete(indexer) + if errors != 'ignore': + raise ValueError('labels %s not contained in axis' + % labels[mask]) + indexer = indexer[~mask] except Exception: pass inds = [] for label in labels: - loc = self.get_loc(label) - if isinstance(loc, int): - inds.append(loc) - else: - inds.extend(lrange(loc.start, loc.stop)) + try: + loc = self.get_loc(label) + if isinstance(loc, int): + inds.append(loc) + else: + inds.extend(lrange(loc.start, loc.stop)) + except KeyError: + if errors != 'ignore': + raise return self.delete(inds) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index cdda087b27613..04335991bd614 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7409,6 +7409,26 @@ def test_drop_names(self): self.assertEqual(obj.columns.name, 'second') self.assertEqual(list(df.columns), ['d', 'e', 'f']) + self.assertRaises(ValueError, df.drop, ['g']) + self.assertRaises(ValueError, df.drop, ['g'], 1) + + # errors = 'ignore' + dropped = df.drop(['g'], errors='ignore') + expected = Index(['a', 'b', 'c']) + self.assert_index_equal(dropped.index, expected) + + dropped = df.drop(['b', 'g'], errors='ignore') + expected = Index(['a', 'c']) + self.assert_index_equal(dropped.index, expected) + + dropped = df.drop(['g'], axis=1, errors='ignore') + expected = Index(['d', 'e', 'f']) + self.assert_index_equal(dropped.columns, expected) + + dropped = df.drop(['d', 'g'], axis=1, errors='ignore') + expected = Index(['e', 'f']) + self.assert_index_equal(dropped.columns, expected) + def test_dropEmptyRows(self): N = len(self.frame.index) mat = randn(N) @@ -7787,6 +7807,19 @@ def test_drop(self): assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :]) assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :]) + self.assertRaises(ValueError, simple.drop, 5) + self.assertRaises(ValueError, simple.drop, 'C', 1) + self.assertRaises(ValueError, simple.drop, [1, 5]) + self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1) + + # errors = 'ignore' + assert_frame_equal(simple.drop(5, errors='ignore'), simple) + assert_frame_equal(simple.drop([0, 5], errors='ignore'), + simple.ix[[1, 2, 3], :]) + assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple) + assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'), + simple[['B']]) + #non-unique - wheee! nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')), columns=['a', 'a', 'b']) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 39db387045f12..61cb337880c00 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1036,20 +1036,43 @@ def check_slice(in_slice, expected): def test_drop(self): n = len(self.strIndex) - dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)]) + drop = self.strIndex[lrange(5, 10)] + dropped = self.strIndex.drop(drop) expected = self.strIndex[lrange(5) + lrange(10, n)] self.assertTrue(dropped.equals(expected)) self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar']) + self.assertRaises(ValueError, self.strIndex.drop, ['1', 'bar']) + + # errors='ignore' + mixed = drop.tolist() + ['foo'] + dropped = self.strIndex.drop(mixed, errors='ignore') + expected = self.strIndex[lrange(5) + lrange(10, n)] + self.assert_index_equal(dropped, expected) + + dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore') + expected = self.strIndex[lrange(n)] + self.assert_index_equal(dropped, expected) dropped = self.strIndex.drop(self.strIndex[0]) expected = self.strIndex[1:] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) ser = Index([1, 2, 3]) dropped = ser.drop(1) expected = Index([2, 3]) - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + # errors='ignore' + self.assertRaises(ValueError, ser.drop, [3, 4]) + + dropped = ser.drop(4, errors='ignore') + expected = Index([1, 2, 3]) + self.assert_index_equal(dropped, expected) + + dropped = ser.drop([3, 4, 5], errors='ignore') + expected = Index([1, 2]) + self.assert_index_equal(dropped, expected) def test_tuple_union_bug(self): import pandas @@ -3529,21 +3552,50 @@ def test_drop(self): dropped2 = self.index.drop(index) expected = self.index[[0, 2, 3, 5]] - self.assertTrue(dropped.equals(expected)) - self.assertTrue(dropped2.equals(expected)) + self.assert_index_equal(dropped, expected) + self.assert_index_equal(dropped2, expected) dropped = self.index.drop(['bar']) expected = self.index[[0, 1, 3, 4, 5]] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop('foo') + expected = self.index[[2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([('bar', 'two')]) self.assertRaises(KeyError, self.index.drop, [('bar', 'two')]) self.assertRaises(KeyError, self.index.drop, index) + self.assertRaises(KeyError, self.index.drop, ['foo', 'two']) + + # partially correct argument + mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) + self.assertRaises(KeyError, self.index.drop, mixed_index) + + # error='ignore' + dropped = self.index.drop(index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 5]] + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop(['foo', 'two'], errors='ignore') + expected = self.index[[2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) # mixed partial / full drop dropped = self.index.drop(['foo', ('qux', 'one')]) expected = self.index[[2, 3, 5]] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + # mixed partial / full drop / error='ignore' + mixed_index = ['foo', ('qux', 'one'), 'two'] + self.assertRaises(KeyError, self.index.drop, mixed_index) + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[2, 3, 5]] + self.assert_index_equal(dropped, expected) def test_droplevel_with_names(self): index = self.index[self.index.get_loc('foo')] diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index cab668b3118fd..0fd03cb5804a8 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1984,6 +1984,15 @@ def check_drop(drop_val, axis_number, aliases, expected): expected = Panel({"One": df}) check_drop('Two', 0, ['items'], expected) + self.assertRaises(ValueError, panel.drop, 'Three') + + # errors = 'ignore' + dropped = panel.drop('Three', errors='ignore') + assert_panel_equal(dropped, panel) + dropped = panel.drop(['Two', 'Three'], errors='ignore') + expected = Panel({"One": df}) + assert_panel_equal(dropped, expected) + # Major exp_df = DataFrame({"A": [2], "B": [4]}, index=[1]) expected = Panel({"One": exp_df, "Two": exp_df}) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c021bb1bf2fd6..f044fe540ea24 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1954,6 +1954,14 @@ def test_drop(self): self.assertRaises(ValueError, s.drop, 'bc') self.assertRaises(ValueError, s.drop, ('a',)) + # errors='ignore' + s = Series(range(3),index=list('abc')) + result = s.drop('bc', errors='ignore') + assert_series_equal(result, s) + result = s.drop(['a', 'd'], errors='ignore') + expected = s.ix[1:] + assert_series_equal(result, expected) + # bad axis self.assertRaises(ValueError, s.drop, 'one', axis='columns')
Closes #5300. Currently `drop` raises `ValueError` when non-existing label is passed. I think it is useful if `drop` has an option to suppress error and drop existing labels only. For example, I sometimes process lots of files which has slightly different columns, and want to `drop` if data has unnecessary columns. Previously, I have to prepare different `drop` arguments checking columns existing each data.
https://api.github.com/repos/pandas-dev/pandas/pulls/6736
2014-03-29T12:21:38Z
2015-04-08T14:35:57Z
2015-04-08T14:35:57Z
2015-04-11T13:09:29Z
ENH: SQL multiindex support
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index efb8ce07ab60e..a80e8049ae627 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -310,8 +310,8 @@ def read_table(table_name, con, meta=None, index_col=None, coerce_float=True, Legacy mode not supported meta : SQLAlchemy meta, optional If omitted MetaData is reflected from engine - index_col : string, optional - Column to set as index + index_col : string or sequence of strings, optional + Column(s) to set as index. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. @@ -324,7 +324,7 @@ def read_table(table_name, con, meta=None, index_col=None, coerce_float=True, to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite - columns : list + columns : list, optional List of column names to select from sql table Returns @@ -340,7 +340,8 @@ def read_table(table_name, con, meta=None, index_col=None, coerce_float=True, table = pandas_sql.read_table(table_name, index_col=index_col, coerce_float=coerce_float, - parse_dates=parse_dates) + parse_dates=parse_dates, + columns=columns) if table is not None: return table @@ -438,19 +439,25 @@ def maybe_asscalar(self, i): def insert(self): ins = self.insert_statement() data_list = [] - # to avoid if check for every row - keys = self.frame.columns + if self.index is not None: - for t in self.frame.itertuples(): - data = dict((k, self.maybe_asscalar(v)) - for k, v in zip(keys, t[1:])) - data[self.index] = self.maybe_asscalar(t[0]) - data_list.append(data) + temp = self.frame.copy() + temp.index.names = self.index + try: + temp.reset_index(inplace=True) + except ValueError as err: + raise ValueError( + "duplicate name in index/columns: {0}".format(err)) else: - for t in self.frame.itertuples(): - data = dict((k, self.maybe_asscalar(v)) - for k, v in zip(keys, t[1:])) - data_list.append(data) + temp = self.frame + + keys = temp.columns + + for t in temp.itertuples(): + data = dict((k, self.maybe_asscalar(v)) + for k, v in zip(keys, t[1:])) + data_list.append(data) + self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): @@ -459,7 +466,7 @@ def read(self, coerce_float=True, parse_dates=None, columns=None): from sqlalchemy import select cols = [self.table.c[n] for n in columns] if self.index is not None: - cols.insert(0, self.table.c[self.index]) + [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select = select(cols) else: sql_select = self.table.select() @@ -476,22 +483,33 @@ def read(self, coerce_float=True, parse_dates=None, columns=None): if self.index is not None: self.frame.set_index(self.index, inplace=True) - # Assume if the index in prefix_index format, we gave it a name - # and should return it nameless - if self.index == self.prefix + '_index': - self.frame.index.name = None - return self.frame def _index_name(self, index, index_label): + # for writing: index=True to include index in sql table if index is True: + nlevels = self.frame.index.nlevels + # if index_label is specified, set this as index name(s) if index_label is not None: - return _safe_col_name(index_label) - elif self.frame.index.name is not None: - return _safe_col_name(self.frame.index.name) + if not isinstance(index_label, list): + index_label = [index_label] + if len(index_label) != nlevels: + raise ValueError( + "Length of 'index_label' should match number of " + "levels, which is {0}".format(nlevels)) + else: + return index_label + # return the used column labels for the index columns + if nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name is None: + return ['index'] else: - return self.prefix + '_index' + return [l if l is not None else "level_{0}".format(i) + for i, l in enumerate(self.frame.index.names)] + + # for reading: index=(list of) string to specify column to set as index elif isinstance(index, string_types): + return [index] + elif isinstance(index, list): return index else: return None @@ -506,10 +524,10 @@ def _create_table_statement(self): for name, typ in zip(safe_columns, column_types)] if self.index is not None: - columns.insert(0, Column(self.index, - self._sqlalchemy_type( - self.frame.index), - index=True)) + for i, idx_label in enumerate(self.index[::-1]): + idx_type = self._sqlalchemy_type( + self.frame.index.get_level_values(i)) + columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) @@ -787,6 +805,17 @@ def insert(self): cur.close() self.pd_sql.con.commit() + def _index_name(self, index, index_label): + if index is True: + if self.frame.index.name is not None: + return _safe_col_name(self.frame.index.name) + else: + return 'pandas_index' + elif isinstance(index, string_types): + return index + else: + return None + def _create_table_statement(self): "Return a CREATE TABLE statement to suit the contents of a DataFrame." diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 57918e8315102..aa1b2516e4fb6 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -7,7 +7,7 @@ import nose import numpy as np -from pandas import DataFrame, Series +from pandas import DataFrame, Series, MultiIndex from pandas.compat import range, lrange, iteritems #from pandas.core.datetools import format as date_format @@ -266,7 +266,7 @@ def _roundtrip(self): self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip') result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip') - result.set_index('pandas_index', inplace=True) + result.set_index('level_0', inplace=True) # result.index.astype(int) result.index.name = None @@ -391,7 +391,7 @@ def test_roundtrip(self): # HACK! result.index = self.test_frame1.index - result.set_index('pandas_index', inplace=True) + result.set_index('level_0', inplace=True) result.index.astype(int) result.index.name = None tm.assert_frame_equal(result, self.test_frame1) @@ -460,7 +460,9 @@ def test_date_and_index(self): issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") + class TestSQLApi(_TestSQLApi): + """Test the public API as it would be used directly """ flavor = 'sqlite' @@ -474,10 +476,10 @@ def connect(self): def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)}) - # no index name, defaults to 'pandas_index' + # no index name, defaults to 'index' sql.to_sql(temp_frame, 'test_index_label', self.conn) frame = sql.read_table('test_index_label', self.conn) - self.assertEqual(frame.columns[0], 'pandas_index') + self.assertEqual(frame.columns[0], 'index') # specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, @@ -487,11 +489,11 @@ def test_to_sql_index_label(self): "Specified index_label not written to database") # using the index name - temp_frame.index.name = 'index' + temp_frame.index.name = 'index_name' sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace') frame = sql.read_table('test_index_label', self.conn) - self.assertEqual(frame.columns[0], 'index', + self.assertEqual(frame.columns[0], 'index_name', "Index name not written to database") # has index name, but specifying index_label @@ -501,8 +503,74 @@ def test_to_sql_index_label(self): self.assertEqual(frame.columns[0], 'other_label', "Specified index_label not written to database") + def test_to_sql_index_label_multiindex(self): + temp_frame = DataFrame({'col1': range(4)}, + index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')])) + + # no index name, defaults to 'level_0' and 'level_1' + sql.to_sql(temp_frame, 'test_index_label', self.conn) + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[0], 'level_0') + self.assertEqual(frame.columns[1], 'level_1') + + # specifying index_label + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace', index_label=['A', 'B']) + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], + "Specified index_labels not written to database") + + # using the index name + temp_frame.index.names = ['A', 'B'] + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace') + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], + "Index names not written to database") + + # has index name, but specifying index_label + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace', index_label=['C', 'D']) + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'], + "Specified index_labels not written to database") + + # wrong length of index_label + self.assertRaises(ValueError, sql.to_sql, temp_frame, + 'test_index_label', self.conn, if_exists='replace', + index_label='C') + + def test_read_table_columns(self): + # test columns argument in read_table + sql.to_sql(self.test_frame1, 'test_frame', self.conn) + + cols = ['A', 'B'] + result = sql.read_table('test_frame', self.conn, columns=cols) + self.assertEqual(result.columns.tolist(), cols, + "Columns not correctly selected") + + def test_read_table_index_col(self): + # test columns argument in read_table + sql.to_sql(self.test_frame1, 'test_frame', self.conn) + + result = sql.read_table('test_frame', self.conn, index_col="index") + self.assertEqual(result.index.names, ["index"], + "index_col not correctly set") + + result = sql.read_table('test_frame', self.conn, index_col=["A", "B"]) + self.assertEqual(result.index.names, ["A", "B"], + "index_col not correctly set") + + result = sql.read_table('test_frame', self.conn, index_col=["A", "B"], + columns=["C", "D"]) + self.assertEqual(result.index.names, ["A", "B"], + "index_col not correctly set") + self.assertEqual(result.columns.tolist(), ["C", "D"], + "columns not set correctly whith index_col") + class TestSQLLegacyApi(_TestSQLApi): + """Test the public legacy API """ flavor = 'sqlite' @@ -554,6 +622,23 @@ def test_sql_open_close(self): tm.assert_frame_equal(self.test_frame2, result) + def test_roundtrip(self): + # this test otherwise fails, Legacy mode still uses 'pandas_index' + # as default index column label + sql.to_sql(self.test_frame1, 'test_frame_roundtrip', + con=self.conn, flavor='sqlite') + result = sql.read_sql( + 'SELECT * FROM test_frame_roundtrip', + con=self.conn, + flavor='sqlite') + + # HACK! + result.index = self.test_frame1.index + result.set_index('pandas_index', inplace=True) + result.index.astype(int) + result.index.name = None + tm.assert_frame_equal(result, self.test_frame1) + class _TestSQLAlchemy(PandasSQLTest): """ @@ -776,6 +861,16 @@ def setUp(self): self._load_test1_data() + def _roundtrip(self): + # overwrite parent function (level_0 -> pandas_index in legacy mode) + self.drop_table('test_frame_roundtrip') + self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip') + result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip') + result.set_index('pandas_index', inplace=True) + result.index.name = None + + tm.assert_frame_equal(result, self.test_frame1) + def test_invalid_flavor(self): self.assertRaises( NotImplementedError, sql.PandasSQLLegacy, self.conn, 'oracle')
Further work on #6292. This adds: - fixes the `columns` argument (wasn't passed) + added test for that - adds multi-index support in `to_sql`, by just using `reset_index` on the frame (+ tests) - adds multi-index support in read_table (+ tests)
https://api.github.com/repos/pandas-dev/pandas/pulls/6735
2014-03-29T11:28:29Z
2014-04-14T12:00:02Z
2014-04-14T12:00:02Z
2014-06-20T16:59:37Z
ENH: added nunique function to Index
diff --git a/doc/source/api.rst b/doc/source/api.rst index 1c80712e82d49..f6dfd5cfaf0e7 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -348,7 +348,6 @@ Computations / Descriptive Stats Series.median Series.min Series.mode - Series.nunique Series.pct_change Series.prod Series.quantile @@ -356,8 +355,9 @@ Computations / Descriptive Stats Series.skew Series.std Series.sum - Series.unique Series.var + Series.unique + Series.nunique Series.value_counts Reindexing / Selection / Label manipulation @@ -1053,6 +1053,8 @@ Modifying and Computations Index.repeat Index.set_names Index.unique + Index.nunique + Index.value_counts Conversion ~~~~~~~~~~ diff --git a/doc/source/release.rst b/doc/source/release.rst index 7188851214f7f..3f3cfe5dd4359 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -159,6 +159,8 @@ API Changes - Arithmetic ops are now disallowed when passed two bool dtype Series or DataFrames (:issue:`6762`). +- Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) + Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 23ab8f10116c1..58eec9fa0f528 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -199,6 +199,7 @@ API changes - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) +- Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/base.py b/pandas/core/base.py index f9bf4ca4ce91d..ec6a4ffbcefbb 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -269,6 +269,56 @@ def min(self): self._is_allowed_index_op('min') return self.values.min() + def value_counts(self, normalize=False, sort=True, ascending=False, + bins=None): + """ + Returns object containing counts of unique values. The resulting object + will be in descending order so that the first element is the most + frequently-occurring element. Excludes NA values. + + Parameters + ---------- + normalize : boolean, default False + If True then the object returned will contain the relative + frequencies of the unique values. + sort : boolean, default True + Sort by values + ascending : boolean, default False + Sort in ascending order + bins : integer, optional + Rather than count values, group them into half-open bins, + a convenience for pd.cut, only works with numeric data + + Returns + ------- + counts : Series + """ + from pandas.core.algorithms import value_counts + return value_counts(self.values, sort=sort, ascending=ascending, + normalize=normalize, bins=bins) + + def unique(self): + """ + Return array of unique values in the object. Significantly faster than + numpy.unique. Includes NA values. + + Returns + ------- + uniques : ndarray + """ + from pandas.core.nanops import unique1d + return unique1d(self.values) + + def nunique(self): + """ + Return count of unique elements in the object. Excludes NA values. + + Returns + ------- + nunique : int + """ + return len(self.value_counts()) + date = _field_accessor('date','Returns numpy array of datetime.date. The date part of the Timestamps') time = _field_accessor('time','Returns numpy array of datetime.time. The time part of the Timestamps') year = _field_accessor('year', "The year of the datetime") diff --git a/pandas/core/index.py b/pandas/core/index.py index bae4a2c455ec6..b2b0764b81d43 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1102,18 +1102,6 @@ def sym_diff(self, other, result_name=None): the_diff = sorted(set((self - other) + (other - self))) return Index(the_diff, name=result_name) - def unique(self): - """ - Return array of unique values in the Index. Significantly faster than - numpy.unique - - Returns - ------- - uniques : ndarray - """ - from pandas.core.nanops import unique1d - return unique1d(self.values) - def get_loc(self, key): """ Get integer location for requested label diff --git a/pandas/core/series.py b/pandas/core/series.py index 4ab7855ec2f84..544d327c9a13d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1095,34 +1095,6 @@ def count(self, level=None): return notnull(_values_from_object(self)).sum() - def value_counts(self, normalize=False, sort=True, ascending=False, - bins=None): - """ - Returns Series containing counts of unique values. The resulting Series - will be in descending order so that the first element is the most - frequently-occurring element. Excludes NA values - - Parameters - ---------- - normalize : boolean, default False - If True then the Series returned will contain the relative - frequencies of the unique values. - sort : boolean, default True - Sort by values - ascending : boolean, default False - Sort in ascending order - bins : integer, optional - Rather than count values, group them into half-open bins, - a convenience for pd.cut, only works with numeric data - - Returns - ------- - counts : Series - """ - from pandas.core.algorithms import value_counts - return value_counts(self.values, sort=sort, ascending=ascending, - normalize=normalize, bins=bins) - def mode(self): """Returns the mode(s) of the dataset. @@ -1143,27 +1115,6 @@ def mode(self): from pandas.core.algorithms import mode return mode(self) - def unique(self): - """ - Return array of unique values in the Series. Significantly faster than - numpy.unique - - Returns - ------- - uniques : ndarray - """ - return nanops.unique1d(self.values) - - def nunique(self): - """ - Return count of unique elements in the Series - - Returns - ------- - nunique : int - """ - return len(self.value_counts()) - def drop_duplicates(self, take_last=False, inplace=False): """ Return Series with duplicate values removed diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 32416dc975e64..6f7d22e6c50fe 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1,11 +1,12 @@ import re +from datetime import timedelta import numpy as np import pandas.compat as compat import pandas as pd -from pandas.compat import u +from pandas.compat import u, StringIO from pandas.core.base import FrozenList, FrozenNDArray from pandas.util.testing import assertRaisesRegexp, assert_isinstance -from pandas import Series, Index, DatetimeIndex, PeriodIndex +from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex from pandas import _np_version_under1p7 import nose @@ -130,6 +131,7 @@ def setUp(self): self.int_index = tm.makeIntIndex(10) self.float_index = tm.makeFloatIndex(10) self.dt_index = tm.makeDateIndex(10) + self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern') self.period_index = tm.makePeriodIndex(10) self.string_index = tm.makeStringIndex(10) @@ -137,10 +139,12 @@ def setUp(self): self.int_series = Series(arr, index=self.int_index) self.float_series = Series(arr, index=self.int_index) self.dt_series = Series(arr, index=self.dt_index) + self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True) self.period_series = Series(arr, index=self.period_index) self.string_series = Series(arr, index=self.string_index) - self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in ['int','float','dt','period','string'] for f in ['index','series'] ] + types = ['int','float','dt', 'dt_tz', 'period','string'] + self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ] def check_ops_properties(self, props, filter=None, ignore_failures=False): for op in props: @@ -193,7 +197,207 @@ def test_ops(self): for o in self.objs: result = getattr(o,op)() expected = getattr(o.values,op)() - self.assertEqual(result, expected) + try: + self.assertEqual(result, expected) + except ValueError: + # comparing tz-aware series with np.array results in ValueError + expected = expected.astype('M8[ns]').astype('int64') + self.assertEqual(result.value, expected) + + def test_value_counts_unique_nunique(self): + for o in self.objs: + klass = type(o) + values = o.values + + # create repeated values, 'n'th element is repeated by n+1 times + if isinstance(o, PeriodIndex): + # freq must be specified because repeat makes freq ambiguous + o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq) + else: + o = klass(np.repeat(values, range(1, len(o) + 1))) + + expected_s = Series(range(10, 0, -1), index=values[::-1]) + tm.assert_series_equal(o.value_counts(), expected_s) + + if isinstance(o, DatetimeIndex): + # DatetimeIndex.unique returns DatetimeIndex + self.assert_(o.unique().equals(klass(values))) + else: + self.assert_numpy_array_equal(o.unique(), values) + + self.assertEqual(o.nunique(), len(np.unique(o.values))) + + for null_obj in [np.nan, None]: + for o in self.objs: + klass = type(o) + values = o.values + + if o.values.dtype == 'int64': + # skips int64 because it doesn't allow to include nan or None + continue + + if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7: + # Unable to assign None + continue + + values[0:2] = null_obj + + # create repeated values, 'n'th element is repeated by n+1 times + if isinstance(o, PeriodIndex): + o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq) + else: + o = klass(np.repeat(values, range(1, len(o) + 1))) + + if isinstance(o, DatetimeIndex): + # DatetimeIndex: nan is casted to Nat and included + expected_s = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1]) + else: + # nan is excluded + expected_s = Series(range(10, 2, -1), index=values[9:1:-1]) + + tm.assert_series_equal(o.value_counts(), expected_s) + + # numpy_array_equal cannot compare arrays includes nan + result = o.unique() + self.assert_numpy_array_equal(result[1:], values[2:]) + + if isinstance(o, DatetimeIndex): + self.assert_(result[0] is pd.NaT) + else: + self.assert_(pd.isnull(result[0])) + + if isinstance(o, DatetimeIndex): + self.assertEqual(o.nunique(), 9) + else: + self.assertEqual(o.nunique(), 8) + + def test_value_counts_inferred(self): + klasses = [Index, Series] + for klass in klasses: + s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a'] + s = klass(s_values) + expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c']) + tm.assert_series_equal(s.value_counts(), expected) + + self.assert_numpy_array_equal(s.unique(), np.unique(s_values)) + self.assertEquals(s.nunique(), 4) + # don't sort, have to sort after the fact as not sorting is platform-dep + hist = s.value_counts(sort=False) + hist.sort() + expected = Series([3, 1, 4, 2], index=list('acbd')) + expected.sort() + tm.assert_series_equal(hist, expected) + + # sort ascending + hist = s.value_counts(ascending=True) + expected = Series([1, 2, 3, 4], index=list('cdab')) + tm.assert_series_equal(hist, expected) + + # relative histogram. + hist = s.value_counts(normalize=True) + expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c']) + tm.assert_series_equal(hist, expected) + + # bins + self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1) + + s1 = Series([1, 1, 2, 3]) + res1 = s1.value_counts(bins=1) + exp1 = Series({0.998: 4}) + tm.assert_series_equal(res1, exp1) + res1n = s1.value_counts(bins=1, normalize=True) + exp1n = Series({0.998: 1.0}) + tm.assert_series_equal(res1n, exp1n) + + self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3])) + self.assertEquals(s1.nunique(), 3) + + res4 = s1.value_counts(bins=4) + exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0]) + tm.assert_series_equal(res4, exp4) + res4n = s1.value_counts(bins=4, normalize=True) + exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0]) + tm.assert_series_equal(res4n, exp4n) + + # handle NA's properly + s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b'] + s = klass(s_values) + expected = Series([4, 3, 2], index=['b', 'a', 'd']) + tm.assert_series_equal(s.value_counts(), expected) + + self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O')) + self.assertEquals(s.nunique(), 3) + + s = klass({}) + expected = Series([], dtype=np.int64) + tm.assert_series_equal(s.value_counts(), expected) + self.assert_numpy_array_equal(s.unique(), np.array([])) + self.assertEquals(s.nunique(), 0) + + # GH 3002, datetime64[ns] + txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG', + 'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM']) + f = StringIO(txt) + df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"], + parse_dates=["dt"]) + + s = klass(df['dt'].copy()) + + idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X']) + expected_s = Series([3, 2, 1], index=idx) + tm.assert_series_equal(s.value_counts(), expected_s) + + expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'], + dtype='datetime64[ns]') + if isinstance(s, DatetimeIndex): + expected = DatetimeIndex(expected) + self.assert_(s.unique().equals(expected)) + else: + self.assert_numpy_array_equal(s.unique(), expected) + + self.assertEquals(s.nunique(), 3) + + # with NaT + s = df['dt'].copy() + s = klass([v for v in s.values] + [pd.NaT]) + + result = s.value_counts() + self.assertEqual(result.index.dtype, 'datetime64[ns]') + expected_s[pd.NaT] = 1 + tm.assert_series_equal(result, expected_s) + + unique = s.unique() + self.assertEqual(unique.dtype, 'datetime64[ns]') + # numpy_array_equal cannot compare pd.NaT + self.assert_numpy_array_equal(unique[:3], expected) + self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT) + + self.assertEquals(s.nunique(), 4) + + # timedelta64[ns] + td = df.dt - df.dt + timedelta(1) + td = klass(td) + + result = td.value_counts() + expected_s = Series([6], index=[86400000000000]) + self.assertEqual(result.index.dtype, 'int64') + tm.assert_series_equal(result, expected_s) + + # get nanoseconds to compare + expected = np.array([86400000000000]) + self.assert_numpy_array_equal(td.unique(), expected) + self.assertEquals(td.nunique(), 1) + + td2 = timedelta(1) + (df.dt - df.dt) + td2 = klass(td2) + result2 = td2.value_counts() + + self.assertEqual(result2.index.dtype, 'int64') + tm.assert_series_equal(result2, expected_s) + + self.assert_numpy_array_equal(td.unique(), expected) + self.assertEquals(td.nunique(), 1) + class TestDatetimeIndexOps(Ops): _allowed = '_allow_datetime_index_ops' diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 143e47baab465..8680446241659 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3805,84 +3805,7 @@ def test_dot(self): self.assertRaises(ValueError, a.dot, b.T) def test_value_counts_nunique(self): - s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a']) - hist = s.value_counts() - expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c']) - assert_series_equal(hist, expected) - - # don't sort, have to sort after the fact as not sorting is platform-dep - hist = s.value_counts(sort=False) - hist.sort() - expected = Series([3, 1, 4, 2], index=list('acbd')) - expected.sort() - assert_series_equal(hist, expected) - - # sort ascending - hist = s.value_counts(ascending=True) - expected = Series([1, 2, 3, 4], index=list('cdab')) - assert_series_equal(hist, expected) - - # relative histogram. - hist = s.value_counts(normalize=True) - expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c']) - assert_series_equal(hist, expected) - - self.assertEquals(s.nunique(), 4) - - # bins - self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1) - - s1 = Series([1, 1, 2, 3]) - res1 = s1.value_counts(bins=1) - exp1 = Series({0.998: 4}) - assert_series_equal(res1, exp1) - res1n = s1.value_counts(bins=1, normalize=True) - exp1n = Series({0.998: 1.0}) - assert_series_equal(res1n, exp1n) - - res4 = s1.value_counts(bins=4) - exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0]) - assert_series_equal(res4, exp4) - res4n = s1.value_counts(bins=4, normalize=True) - exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0]) - assert_series_equal(res4n, exp4n) - - # handle NA's properly - s[5:7] = np.nan - hist = s.value_counts() - expected = s.dropna().value_counts() - assert_series_equal(hist, expected) - - s = Series({}) - hist = s.value_counts() - expected = Series([], dtype=np.int64) - assert_series_equal(hist, expected) - - # GH 3002, datetime64[ns] - import pandas as pd - f = StringIO( - "xxyyzz20100101PIE\nxxyyzz20100101GUM\nxxyyww20090101EGG\nfoofoo20080909PIE") - df = pd.read_fwf(f, widths=[6, 8, 3], names=[ - "person_id", "dt", "food"], parse_dates=["dt"]) - s = df.dt.copy() - result = s.value_counts() - self.assertEqual(result.index.dtype, 'datetime64[ns]') - - # with NaT - s = s.append(Series({4: pd.NaT})) - result = s.value_counts() - self.assertEqual(result.index.dtype, 'datetime64[ns]') - - # timedelta64[ns] - from datetime import timedelta - td = df.dt - df.dt + timedelta(1) - td2 = timedelta(1) + (df.dt - df.dt) - result = td.value_counts() - result2 = td2.value_counts() - #self.assertEqual(result.index.dtype, 'timedelta64[ns]') - self.assertEqual(result.index.dtype, 'int64') - self.assertEqual(result2.index.dtype, 'int64') - + # basics.rst doc example series = Series(np.random.randn(500)) series[20:500] = np.nan @@ -3909,25 +3832,7 @@ def test_unique(self): result = s.unique() self.assertEqual(len(result), 2) - # integers - s = Series(np.random.randint(0, 100, size=100)) - result = np.sort(s.unique()) - expected = np.unique(s.values) - self.assert_numpy_array_equal(result, expected) - - s = Series(np.random.randint(0, 100, size=100).astype(np.int32)) - result = np.sort(s.unique()) - expected = np.unique(s.values) - self.assert_numpy_array_equal(result, expected) - - # test string arrays for coverage - strings = np.tile(np.array([tm.rands(10) for _ in range(10)]), 10) - result = np.sort(nanops.unique1d(strings)) - expected = np.unique(strings) - self.assert_numpy_array_equal(result, expected) - # decision about None - s = Series([1, 2, 3, None, None, None], dtype=object) result = s.unique() expected = np.array([1, 2, 3, None], dtype=object) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 4a4fbb146861d..9dc26f2b01ccc 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1487,6 +1487,17 @@ def test_index_duplicate_periods(self): expected = ts[idx == 2007] assert_series_equal(result, expected) + def test_index_unique(self): + idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN') + expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN') + self.assert_numpy_array_equal(idx.unique(), expected.values) + self.assertEqual(idx.nunique(), 3) + + idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern') + expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern') + self.assert_numpy_array_equal(idx.unique(), expected.values) + self.assertEqual(idx.nunique(), 3) + def test_constructor(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') assert_equal(len(pi), 9) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 64da6f76f3697..f7edd92fce122 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -77,7 +77,11 @@ def test_is_unique_monotonic(self): def test_index_unique(self): uniques = self.dups.index.unique() + expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3), + datetime(2000, 1, 4), datetime(2000, 1, 5)]) self.assertEqual(uniques.dtype, 'M8[ns]') # sanity + self.assert_(uniques.equals(expected)) + self.assertEqual(self.dups.index.nunique(), 4) # #2563 self.assertTrue(isinstance(uniques, DatetimeIndex)) @@ -85,8 +89,21 @@ def test_index_unique(self): dups_local = self.dups.index.tz_localize('US/Eastern') dups_local.name = 'foo' result = dups_local.unique() + expected = DatetimeIndex(expected, tz='US/Eastern') self.assertTrue(result.tz is not None) self.assertEquals(result.name, 'foo') + self.assert_(result.equals(expected)) + + # NaT + arr = [ 1370745748 + t for t in range(20) ] + [iNaT] + idx = DatetimeIndex(arr * 3) + self.assert_(idx.unique().equals(DatetimeIndex(arr))) + self.assertEqual(idx.nunique(), 21) + + arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT] + idx = DatetimeIndex(arr * 3) + self.assert_(idx.unique().equals(DatetimeIndex(arr))) + self.assertEqual(idx.nunique(), 21) def test_index_dupes_contains(self): d = datetime(2011, 12, 5, 20, 30)
Added `nunique` function to `Index` same as `Series`.
https://api.github.com/repos/pandas-dev/pandas/pulls/6734
2014-03-29T09:40:22Z
2014-04-06T14:21:20Z
2014-04-06T14:21:20Z
2014-06-13T16:59:22Z
BUG: Bug in resample when how=None resample freq is the same as the axis freq (GH5955)
diff --git a/doc/source/release.rst b/doc/source/release.rst index dcbf8b8c7f271..3d4ff0610f43f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -291,6 +291,7 @@ Bug Fixes - Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores 'with_index' keyword argument (:issue:`6685`) - Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) - Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) +- Bug in resample when ``how=None`` resample freq is the same as the axis frequency (:issue:`5955`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 8b65882fb1279..7f243c20fe56e 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -246,7 +246,9 @@ def _resample_timestamps(self): # Determine if we're downsampling if axlabels.freq is not None or axlabels.inferred_freq is not None: + if len(grouper.binlabels) < len(axlabels) or self.how is not None: + # downsample grouped = obj.groupby(grouper, axis=self.axis) result = grouped.aggregate(self._agg_method) else: @@ -259,8 +261,15 @@ def _resample_timestamps(self): else: res_index = binner[:-1] - result = obj.reindex(res_index, method=self.fill_method, - limit=self.limit) + # if we have the same frequency as our axis, then we are equal sampling + # even if how is None + if self.fill_method is None and self.limit is None and to_offset( + axlabels.inferred_freq) == self.freq: + result = obj.copy() + result.index = res_index + else: + result = obj.reindex(res_index, method=self.fill_method, + limit=self.limit) else: # Irregular data, have to use groupby grouped = obj.groupby(grouper, axis=self.axis) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 55d96ec6fbaeb..5f975105cd80e 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -160,6 +160,32 @@ def test_resample_basic_from_daily(self): self.assertEquals(result.irow(5), s['1/9/2005']) self.assertEqual(result.index.name, 'index') + def test_resample_upsampling_picked_but_not_correct(self): + + # Test for issue #3020 + dates = date_range('01-Jan-2014','05-Jan-2014', freq='D') + series = Series(1, index=dates) + + result = series.resample('D') + self.assertEquals(result.index[0], dates[0]) + + # GH 5955 + # incorrect deciding to upsample when the axis frequency matches the resample frequency + + import datetime + s = Series(np.arange(1.,6),index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)]) + expected = Series(np.arange(1.,6),index=date_range('19750101',periods=5,freq='D')) + + result = s.resample('D',how='count') + assert_series_equal(result,Series(1,index=expected.index)) + + result1 = s.resample('D',how='sum') + result2 = s.resample('D',how='mean') + result3 = s.resample('D') + assert_series_equal(result1,expected) + assert_series_equal(result2,expected) + assert_series_equal(result3,expected) + def test_resample_frame_basic(self): df = tm.makeTimeDataFrame() @@ -1078,15 +1104,6 @@ def test_all_values_single_bin(self): result = s.resample("A", how='mean') tm.assert_almost_equal(result[0], s.mean()) - def test_resample_doesnt_truncate(self): - # Test for issue #3020 - import pandas as pd - dates = pd.date_range('01-Jan-2014','05-Jan-2014', freq='D') - series = Series(1, index=dates) - - result = series.resample('D') - self.assertEquals(result.index[0], dates[0]) - def test_evenly_divisible_with_no_extra_bins(self): # 4076 # when the frequency is evenly divisible, sometimes extra bins
closes #5955
https://api.github.com/repos/pandas-dev/pandas/pulls/6731
2014-03-29T00:41:15Z
2014-03-29T01:53:36Z
2014-03-29T01:53:36Z
2014-07-16T08:59:36Z
BUG: bug in BlockManager._get_numeric_data, with invalid combine
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index fe5ae48fea281..d32664559f7fc 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1907,7 +1907,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): values = self.values if inplace else self.values.copy() return [self.make_block(values.get_values(value), fill_value=value)] - + def shift(self, periods, axis=0): """ shift the block by periods """ N = len(self.values.T) @@ -2674,18 +2674,17 @@ def get_data(self, copy=False, columns=None, **kwargs): if len(blocks) == 0: return self.make_empty() - return self.combine(blocks) + return self.combine(blocks, copy=copy) - def combine(self, blocks): + def combine(self, blocks, copy=True): """ return a new manager with the blocks """ indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) new_items = self.items.take(indexer) new_blocks = [] for b in blocks: - b = b.copy(deep=False) - b.ref_items = new_items - new_blocks.append(b) + b = b.reindex_items_from(new_items, copy=copy) + new_blocks.extend(_valid_blocks(b)) new_axes = list(self.axes) new_axes[0] = new_items return self.__class__(new_blocks, new_axes, do_integrity_check=False) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a7270dc4517b7..1bbcba0e4caad 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10424,6 +10424,16 @@ def test_get_numeric_data(self): expected = df.ix[:, []] assert_frame_equal(result, expected) + df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]}) + result = df._get_numeric_data() + expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]}) + assert_frame_equal(result, expected) + + df = result.copy() + result = df._get_numeric_data() + expected = df + assert_frame_equal(result, expected) + def test_bool_describe_in_mixed_frame(self): df = DataFrame({ 'string_data': ['a', 'b', 'c', 'd', 'e'], @@ -10937,7 +10947,7 @@ def test_rank2(self): expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0 result = df.rank(1, pct=True) assert_frame_equal(result, expected) - + df = DataFrame([[1, 3, 2], [1, 2, 3]]) expected = df.rank(0) / 2.0 result = df.rank(0, pct=True) @@ -10950,7 +10960,7 @@ def test_rank2(self): result = df.rank(1, numeric_only=False) assert_frame_equal(result, expected) - + expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) result = df.rank(0, numeric_only=False) assert_frame_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/6730
2014-03-28T20:26:01Z
2014-03-28T23:05:30Z
2014-03-28T23:05:30Z
2014-07-16T08:59:35Z
added percentage rank to DataFrame.Rank
diff --git a/doc/source/release.rst b/doc/source/release.rst index 47a2ef82c78dc..dcbf8b8c7f271 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -190,6 +190,7 @@ Improvements to existing features - implement joining a single-level indexed DataFrame on a matching column of a multi-indexed DataFrame (:issue:`3662`) - Performance improvement in indexing into a multi-indexed Series (:issue:`5567`) - Testing statements updated to use specialized asserts (:issue:`6175`) +- ``DataFrame.rank()`` now has a percentage rank option (:issue:`5971`) - ``Series.rank()`` now has a percentage rank option (:issue:`5971`) - ``Series.rank()`` and ``DataFrame.rank()`` now accept ``method='dense'`` for ranks without gaps (:issue:`6514`) - ``quotechar``, ``doublequote``, and ``escapechar`` can now be specified when diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 14c9ec2f3355d..27e25c3954dad 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -283,7 +283,7 @@ def rank_1d_int64(object in_arr, ties_method='average', ascending=True, def rank_2d_float64(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep'): + ascending=True, na_option='keep', pct=False): """ Fast NaN-friendly version of scipy.stats.rankdata """ @@ -296,6 +296,7 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', float64_t sum_ranks = 0 int tiebreak = 0 bint keep_na = 0 + float count = 0.0 tiebreak = tiebreakers[ties_method] @@ -335,6 +336,7 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', for i in range(n): dups = sum_ranks = 0 total_tie_count = 0 + count = 0.0 for j in range(k): sum_ranks += j + 1 dups += 1 @@ -342,6 +344,7 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', if val == nan_value and keep_na: ranks[i, argsorted[i, j]] = nan continue + count += 1.0 if j == k - 1 or fabs(values[i, j + 1] - val) > FP_ERR: if tiebreak == TIEBREAK_AVERAGE: for z in range(j - dups + 1, j + 1): @@ -363,7 +366,8 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', for z in range(j - dups + 1, j + 1): ranks[i, argsorted[i, z]] = total_tie_count sum_ranks = dups = 0 - + if pct: + ranks[i, :] /= count if axis == 0: return ranks.T else: @@ -371,7 +375,7 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', def rank_2d_int64(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep'): + ascending=True, na_option='keep', pct=False): """ Fast NaN-friendly version of scipy.stats.rankdata """ @@ -384,6 +388,7 @@ def rank_2d_int64(object in_arr, axis=0, ties_method='average', int64_t val float64_t sum_ranks = 0 int tiebreak = 0 + float count = 0.0 tiebreak = tiebreakers[ties_method] if axis == 0: @@ -411,10 +416,12 @@ def rank_2d_int64(object in_arr, axis=0, ties_method='average', for i in range(n): dups = sum_ranks = 0 total_tie_count = 0 + count = 0.0 for j in range(k): sum_ranks += j + 1 dups += 1 val = values[i, j] + count += 1.0 if j == k - 1 or fabs(values[i, j + 1] - val) > FP_ERR: if tiebreak == TIEBREAK_AVERAGE: for z in range(j - dups + 1, j + 1): @@ -436,7 +443,8 @@ def rank_2d_int64(object in_arr, axis=0, ties_method='average', for z in range(j - dups + 1, j + 1): ranks[i, argsorted[i, z]] = total_tie_count sum_ranks = dups = 0 - + if pct: + ranks[i, :] /= count if axis == 0: return ranks.T else: @@ -528,7 +536,7 @@ def rank_1d_generic(object in_arr, bint retry=1, ties_method='average', ranks[argsorted[j]] = total_tie_count sum_ranks = dups = 0 if pct: - ranks / count + return ranks / count else: return ranks @@ -562,7 +570,7 @@ class NegInfinity(object): __cmp__ = _return_true def rank_2d_generic(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep'): + ascending=True, na_option='keep', pct=False): """ Fast NaN-friendly version of scipy.stats.rankdata """ @@ -577,6 +585,7 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average', float64_t sum_ranks = 0 int tiebreak = 0 bint keep_na = 0 + float count = 0.0 tiebreak = tiebreakers[ties_method] @@ -611,7 +620,8 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average', for i in range(len(values)): ranks[i] = rank_1d_generic(in_arr[i], ties_method=ties_method, - ascending=ascending) + ascending=ascending, + pct=pct) if axis == 0: return ranks.T else: @@ -626,12 +636,14 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average', for i in range(n): dups = sum_ranks = infs = 0 total_tie_count = 0 + count = 0.0 for j in range(k): val = values[i, j] if val is nan_value and keep_na: ranks[i, argsorted[i, j]] = nan infs += 1 continue + count += 1.0 sum_ranks += (j - infs) + 1 dups += 1 if j == k - 1 or are_diff(values[i, j + 1], val): @@ -652,7 +664,8 @@ def rank_2d_generic(object in_arr, axis=0, ties_method='average', for z in range(j - dups + 1, j + 1): ranks[i, argsorted[i, z]] = total_tie_count sum_ranks = dups = 0 - + if pct: + ranks[i, :] /= count if axis == 0: return ranks.T else: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f20c316393244..e2ef178c62e71 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -277,7 +277,7 @@ def rank(values, axis=0, method='average', na_option='keep', elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, - ascending=ascending, na_option=na_option) + ascending=ascending, na_option=na_option, pct=pct) return ranks diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 430b309260f8c..5ecdd4d8b351d 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4182,7 +4182,7 @@ def f(arr): return data.apply(f, axis=axis) def rank(self, axis=0, numeric_only=None, method='average', - na_option='keep', ascending=True): + na_option='keep', ascending=True, pct=False): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values @@ -4205,6 +4205,8 @@ def rank(self, axis=0, numeric_only=None, method='average', * bottom: smallest rank if descending ascending : boolean, default True False for ranks by high (1) to low (N) + pct : boolean, default False + Computes percentage rank of data Returns ------- @@ -4214,18 +4216,18 @@ def rank(self, axis=0, numeric_only=None, method='average', if numeric_only is None: try: ranks = algos.rank(self.values, axis=axis, method=method, - ascending=ascending, na_option=na_option) + ascending=ascending, na_option=na_option, + pct=pct) return self._constructor(ranks, index=self.index, columns=self.columns) except TypeError: numeric_only = True - if numeric_only: data = self._get_numeric_data() else: data = self ranks = algos.rank(data.values, axis=axis, method=method, - ascending=ascending, na_option=na_option) + ascending=ascending, na_option=na_option, pct=pct) return self._constructor(ranks, index=data.index, columns=data.columns) def to_timestamp(self, freq=None, how='start', axis=0, copy=True): diff --git a/pandas/core/series.py b/pandas/core/series.py index c3300e7b35a8b..47721ab371c3b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1732,7 +1732,7 @@ def rank(self, method='average', na_option='keep', ascending=True, keep: leave NA values where they are ascending : boolean, default True False for ranks by high (1) to low (N) - pct : boolean, defeault False + pct : boolean, default False Computes percentage rank of data Returns diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d93232c18ee31..a7270dc4517b7 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10933,12 +10933,24 @@ def test_rank(self): def test_rank2(self): from datetime import datetime + df = DataFrame([[1, 3, 2], [1, 2, 3]]) + expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0 + result = df.rank(1, pct=True) + assert_frame_equal(result, expected) + + df = DataFrame([[1, 3, 2], [1, 2, 3]]) + expected = df.rank(0) / 2.0 + result = df.rank(0, pct=True) + assert_frame_equal(result, expected) + + df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']]) expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]]) result = df.rank(1, numeric_only=False) assert_frame_equal(result, expected) + expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) result = df.rank(0, numeric_only=False) assert_frame_equal(result, expected)
closes #6717 @jreback asked for DataFrame rank to have a percentage option to match consistency with series
https://api.github.com/repos/pandas-dev/pandas/pulls/6728
2014-03-28T14:42:29Z
2014-03-28T15:17:31Z
2014-03-28T15:17:31Z
2014-07-16T08:59:31Z
Shift named axis
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6c57a9ce5beaa..67c58b0c0e6f4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3232,6 +3232,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): if periods == 0: return self + axis = self._get_axis_number(axis) if freq is None and not len(kwds): new_data = self._data.shift(periods=periods, axis=axis) else: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index c1862c4ff91ab..d93232c18ee31 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5439,13 +5439,13 @@ def test_to_csv_deprecated_options(self): self.tsframe.to_csv(path, nanRep='foo') recons = read_csv(path,index_col=0,parse_dates=[0],na_values=['foo']) assert_frame_equal(self.tsframe, recons) - + with tm.assert_produces_warning(FutureWarning): self.frame.to_csv(path, cols=['A', 'B']) with tm.assert_produces_warning(False): self.frame.to_csv(path, columns=['A', 'B']) - + def test_to_csv_from_csv(self): @@ -9194,6 +9194,12 @@ def test_shift(self): result = df.shift(1,axis=1) assert_frame_equal(result,expected) + # shift named axis + df = DataFrame(np.random.rand(10,5)) + expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1) + result = df.shift(1,axis='columns') + assert_frame_equal(result,expected) + def test_shift_bool(self): df = DataFrame({'high': [True, False], 'low': [False, False]}) @@ -9827,7 +9833,7 @@ def test_sort_nan(self): df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], 'B': [9, nan, 5, 2, 5, 4, 5]}, index = [1, 2, 3, 4, 5, 6, nan]) - + # NaN label, ascending=True, na_position='last' sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last') expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], @@ -9884,7 +9890,7 @@ def test_stable_descending_multicolumn_sort(self): sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first', kind='mergesort') assert_frame_equal(sorted_df, expected) - + def test_sort_index_multicolumn(self): import random A = np.arange(5).repeat(20)
simple fix for #6724. was blocking the panel general shift PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/6725
2014-03-28T01:09:06Z
2014-03-28T03:39:43Z
2014-03-28T03:39:43Z
2014-07-16T08:59:30Z
BUG: MPLPlot cannot make loglog keyword worked
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 7b37cf09d5638..a64f24a61db9b 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -409,6 +409,22 @@ def test_plot_xy(self): # columns.inferred_type == 'mixed' # TODO add MultiIndex test + @slow + def test_logscales(self): + df = DataFrame({'a': np.arange(100)}, + index=np.arange(100)) + ax = df.plot(logy=True) + self.assertEqual(ax.xaxis.get_scale(), 'linear') + self.assertEqual(ax.yaxis.get_scale(), 'log') + + ax = df.plot(logx=True) + self.assertEqual(ax.xaxis.get_scale(), 'log') + self.assertEqual(ax.yaxis.get_scale(), 'linear') + + ax = df.plot(loglog=True) + self.assertEqual(ax.xaxis.get_scale(), 'log') + self.assertEqual(ax.yaxis.get_scale(), 'log') + @slow def test_xcompat(self): import pandas as pd @@ -1229,6 +1245,7 @@ def test_errorbar_plot(self): # check line plots _check_plot_works(df.plot, yerr=df_err, logy=True) _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True) + _check_plot_works(df.plot, yerr=df_err, loglog=True) kinds = ['line', 'bar', 'barh'] for kind in kinds: diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2b73ae77970bf..8fdd6087bfbb3 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -976,9 +976,9 @@ def _setup_subplots(self): axes = [ax] - if self.logx: + if self.logx or self.loglog: [a.set_xscale('log') for a in axes] - if self.logy: + if self.logy or self.loglog: [a.set_yscale('log') for a in axes] self.fig = fig @@ -1879,9 +1879,11 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, scatter: scatter plot hexbin: hexbin plot logx : boolean, default False - For line plots, use log scaling on x axis + Use log scaling on x axis logy : boolean, default False - For line plots, use log scaling on y axis + Use log scaling on y axis + loglog : boolean, default False + Use log scaling on both x and y axes xticks : sequence Values to use for the xticks yticks : sequence @@ -2031,9 +2033,11 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, grid : matplotlib grid legend: matplotlib legend logx : boolean, default False - For line plots, use log scaling on x axis + Use log scaling on x axis logy : boolean, default False - For line plots, use log scaling on y axis + Use log scaling on y axis + loglog : boolean, default False + Use log scaling on both x and y axes secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right figsize : a tuple (width, height) in inches
Changes made in #5638 make `loglog` keyword not worked, and it fixes the problem.
https://api.github.com/repos/pandas-dev/pandas/pulls/6722
2014-03-27T20:30:06Z
2014-03-30T14:42:26Z
2014-03-30T14:42:26Z
2014-07-09T08:24:18Z
BUG: Bug in consistency of groupby aggregation when passing a custom function (GH6715)
diff --git a/doc/source/release.rst b/doc/source/release.rst index ea5af9165b483..47a2ef82c78dc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -139,11 +139,11 @@ API Changes - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - + - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - + - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) - + - Define and document the order of column vs index names in query/eval (:issue:`6676`) @@ -289,6 +289,7 @@ Bug Fixes - Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) - Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores 'with_index' keyword argument (:issue:`6685`) - Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) +- Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) pandas 0.13.1 ------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 208f9f1a8e19a..996a691eca082 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1081,10 +1081,13 @@ def apply(self, f, data, axis=0): try: values, mutated = splitter.fast_apply(f, group_keys) return group_keys, values, mutated - except Exception: + except (lib.InvalidApply): # we detect a mutation of some kind # so take slow path pass + except (Exception) as e: + # raise this error to the caller + pass result_values = [] for key, (i, group) in zip(group_keys, splitter): @@ -2295,7 +2298,15 @@ def aggregate(self, arg, *args, **kwargs): if self.grouper.nkeys > 1: return self._python_agg_general(arg, *args, **kwargs) else: - result = self._aggregate_generic(arg, *args, **kwargs) + + # try to treat as if we are passing a list + try: + assert not args and not kwargs + result = self._aggregate_multiple_funcs([arg]) + result.columns = Index(result.columns.levels[0], + name=self._selected_obj.columns.name) + except: + result = self._aggregate_generic(arg, *args, **kwargs) if not self.as_index: if isinstance(result.index, MultiIndex): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index b14c355f44a1c..79eac770f547e 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1946,6 +1946,29 @@ def test_grouping_ndarray(self): expected = self.df.groupby('A').sum() assert_frame_equal(result, expected, check_names=False) # Note: no names when grouping by value + def test_agg_consistency(self): + # agg with ([]) and () not consistent + # GH 6715 + + def P1(a): + try: + return np.percentile(a.dropna(), q=1) + except: + return np.nan + + import datetime as dt + df = DataFrame({'col1':[1,2,3,4], + 'col2':[10,25,26,31], + 'date':[dt.date(2013,2,10),dt.date(2013,2,10),dt.date(2013,2,11),dt.date(2013,2,11)]}) + + g = df.groupby('date') + + expected = g.agg([P1]) + expected.columns = expected.columns.levels[0] + + result = g.agg(P1) + assert_frame_equal(result, expected) + def test_apply_typecast_fail(self): df = DataFrame({'d': [1., 1., 1., 2., 2., 2.], 'c': np.tile(['a', 'b', 'c'], 2),
closes #6715
https://api.github.com/repos/pandas-dev/pandas/pulls/6718
2014-03-27T13:18:58Z
2014-03-28T12:34:13Z
2014-03-28T12:34:13Z
2014-07-16T08:59:25Z
BUG: Allow mapping as parameters for SQL DBAPI2
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index fa89cf488125a..ac90555526a5e 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -31,7 +31,10 @@ def _convert_params(sql, params): """convert sql and params args to DBAPI2.0 compliant format""" args = [sql] if params is not None: - args += list(params) + if hasattr(params, 'keys'): # test if params is a mapping + args += [params] + else: + args += [list(params)] return args @@ -200,7 +203,7 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets cur : depreciated, cursor is obtained from connection - params : list or tuple, optional + params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict - List of column names to parse as dates diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2f9323e50c9e2..80da7ae6bf391 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -95,6 +95,24 @@ INSERT INTO types_test_data VALUES(%s, %s, %s, %s, %s, %s, %s, %s) """ + }, + 'read_parameters': { + 'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?", + 'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s', + 'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s' + }, + 'read_named_parameters': { + 'sqlite': """ + SELECT * FROM iris WHERE Name=:name AND SepalLength=:length + """, + 'mysql': """ + SELECT * FROM iris WHERE + `Name`="%(name)s" AND `SepalLength`=%(length)s + """, + 'postgresql': """ + SELECT * FROM iris WHERE + "Name"=%(name)s AND "SepalLength"=%(length)s + """ } } @@ -168,6 +186,18 @@ def _read_sql_iris(self): iris_frame = self.pandasSQL.read_sql("SELECT * FROM iris") self._check_iris_loaded_frame(iris_frame) + def _read_sql_iris_parameter(self): + query = SQL_STRINGS['read_parameters'][self.flavor] + params = ['Iris-setosa', 5.1] + iris_frame = self.pandasSQL.read_sql(query, params=params) + self._check_iris_loaded_frame(iris_frame) + + def _read_sql_iris_named_parameter(self): + query = SQL_STRINGS['read_named_parameters'][self.flavor] + params = {'name': 'Iris-setosa', 'length': 5.1} + iris_frame = self.pandasSQL.read_sql(query, params=params) + self._check_iris_loaded_frame(iris_frame) + def _to_sql(self): self.drop_table('test_frame1') @@ -491,6 +521,12 @@ class _TestSQLAlchemy(PandasSQLTest): def test_read_sql(self): self._read_sql_iris() + def test_read_sql_parameter(self): + self._read_sql_iris_parameter() + + def test_read_sql_named_parameter(self): + self._read_sql_iris_named_parameter() + def test_to_sql(self): self._to_sql() @@ -703,6 +739,12 @@ def test_invalid_flavor(self): def test_read_sql(self): self._read_sql_iris() + def test_read_sql_parameter(self): + self._read_sql_iris_parameter() + + def test_read_sql_named_parameter(self): + self._read_sql_iris_named_parameter() + def test_to_sql(self): self._to_sql()
According to the DBAPI2.0 the parameters of the execute method can be a list or a mapping. The code in the master branch assume that this parameter is a list which can break working code. That's a regression compared to the pandas 0.13.1 Closes #6708
https://api.github.com/repos/pandas-dev/pandas/pulls/6709
2014-03-26T02:34:29Z
2014-03-31T16:49:52Z
2014-03-31T16:49:52Z
2014-07-01T14:57:33Z
PERF: #6700 dataframe.from_records: read into list w/ itertools.islice when specifying nrows
diff --git a/doc/source/release.rst b/doc/source/release.rst index df0f472c390c7..be988236cb7f6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -185,6 +185,8 @@ Improvements to existing features - Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) - Performance improvement for ``DataFrame.shift`` (:issue: `5609`) +- Performance improvement for ``DataFrame.from_records`` when reading a + specified number of rows from an iterable (:issue:`6700`) .. _release.bug_fixes-0.14.0: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8cf164ba76c21..d16b64d4d693d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -12,10 +12,11 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0212,W0231,W0703,W0622 -import sys import collections -import warnings +import itertools +import sys import types +import warnings from numpy import nan as NA import numpy as np @@ -756,17 +757,10 @@ def from_records(cls, data, index=None, exclude=None, columns=None, values = [first_row] - # if unknown length iterable (generator) if nrows is None: - # consume whole generator - values += list(data) + values += data else: - i = 1 - for row in data: - values.append(row) - i += 1 - if i >= nrows: - break + values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 7f9063003191f..db53f00c576d6 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -386,7 +386,7 @@ def test_equal(name): def test_unequal(name): df, df2 = pairs[name] return df.equals(df2) - + float_df = DataFrame(np.random.randn(1000, 1000)) object_df = DataFrame([['foo']*1000]*1000) nonunique_cols = object_df.copy() @@ -441,4 +441,23 @@ def test_unequal(name): start_date=datetime(2014,1,1)) frame_shift_axis1 = Benchmark('df.shift(1,axis=1)', setup, name = 'frame_shift_axis_1', - start_date=datetime(2014,1,1)) \ No newline at end of file + start_date=datetime(2014,1,1)) + + +#----------------------------------------------------------------------------- +# from_records issue-6700 + +setup = common_setup + """ +def get_data(n=100000): + return ((x, x*20, x*100) for x in xrange(n)) +""" + +frame_from_records_generator = Benchmark('df = DataFrame.from_records(get_data())', + setup, + name='frame_from_records_generator', + start_date=datetime(2013,10,04)) # issue-4911 + +frame_from_records_generator_nrows = Benchmark('df = DataFrame.from_records(get_data(), nrows=1000)', + setup, + name='frame_from_records_generator_nrows', + start_date=datetime(2013,10,04)) # issue-4911
https://api.github.com/repos/pandas-dev/pandas/pulls/6702
2014-03-25T12:34:11Z
2014-04-06T15:46:48Z
2014-04-06T15:46:48Z
2014-06-27T23:41:03Z
ENH/VIS: Dataframe bar plot can now handle align keyword properly
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6d7751266036b..5134130ba7865 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -135,10 +135,16 @@ API Changes the index, rather than requiring a list of tuple (:issue:`4370`) - Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`) + - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. - - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. + + - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) + + - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) + - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) - - Define and document the order of column vs index names in query/eval + +- Define and document the order of column vs index names in query/eval (:issue:`6676`) Deprecations diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 0b94b8e44a0dc..95537878871b1 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -174,9 +174,15 @@ These are out-of-bounds selections df_multi.set_index([df_multi.index, df_multi.index]) - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. - - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. + + - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) + + - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) + - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) + Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. + MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index fd0463ccd7ba0..7b37cf09d5638 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -615,47 +615,12 @@ def test_bar_barwidth(self): @slow def test_bar_barwidth_position(self): df = DataFrame(randn(5, 5)) - - width = 0.9 - position = 0.2 - - # regular - ax = df.plot(kind='bar', width=width, position=position) - p = ax.patches[0] - self.assertEqual(ax.xaxis.get_ticklocs()[0], - p.get_x() + p.get_width() * position * len(df.columns)) - - # stacked - ax = df.plot(kind='bar', stacked=True, width=width, position=position) - p = ax.patches[0] - self.assertEqual(ax.xaxis.get_ticklocs()[0], - p.get_x() + p.get_width() * position) - - # horizontal regular - ax = df.plot(kind='barh', width=width, position=position) - p = ax.patches[0] - self.assertEqual(ax.yaxis.get_ticklocs()[0], - p.get_y() + p.get_height() * position * len(df.columns)) - - # horizontal stacked - ax = df.plot(kind='barh', stacked=True, width=width, position=position) - p = ax.patches[0] - self.assertEqual(ax.yaxis.get_ticklocs()[0], - p.get_y() + p.get_height() * position) - - # subplots - axes = df.plot(kind='bar', width=width, position=position, subplots=True) - for ax in axes: - p = ax.patches[0] - self.assertEqual(ax.xaxis.get_ticklocs()[0], - p.get_x() + p.get_width() * position) - - # horizontal subplots - axes = df.plot(kind='barh', width=width, position=position, subplots=True) - for ax in axes: - p = ax.patches[0] - self.assertEqual(ax.yaxis.get_ticklocs()[0], - p.get_y() + p.get_height() * position) + self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9, position=0.2) + self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9, position=0.2) + self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9, position=0.2) + self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9, position=0.2) + self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2) + self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2) @slow def test_plot_scatter(self): @@ -692,68 +657,144 @@ def test_plot_bar(self): df = DataFrame({'a': [0, 1], 'b': [1, 0]}) _check_plot_works(df.plot, kind='bar') + def _check_bar_alignment(self, df, kind='bar', stacked=False, + subplots=False, align='center', + width=0.5, position=0.5): + + axes = df.plot(kind=kind, stacked=stacked, subplots=subplots, + align=align, width=width, position=position, + grid=True) + + tick_pos = np.arange(len(df)) + + if not isinstance(axes, np.ndarray): + axes = [axes] + + for ax in axes: + if kind == 'bar': + axis = ax.xaxis + ax_min, ax_max = ax.get_xlim() + elif kind == 'barh': + axis = ax.yaxis + ax_min, ax_max = ax.get_ylim() + else: + raise ValueError + + p = ax.patches[0] + if kind == 'bar' and (stacked is True or subplots is True): + edge = p.get_x() + center = edge + p.get_width() * position + tickoffset = width * position + elif kind == 'bar' and stacked is False: + center = p.get_x() + p.get_width() * len(df.columns) * position + edge = p.get_x() + if align == 'edge': + tickoffset = width * (position - 0.5) + p.get_width() * 1.5 + else: + tickoffset = width * position + p.get_width() + elif kind == 'barh' and (stacked is True or subplots is True): + center = p.get_y() + p.get_height() * position + edge = p.get_y() + tickoffset = width * position + elif kind == 'barh' and stacked is False: + center = p.get_y() + p.get_height() * len(df.columns) * position + edge = p.get_y() + if align == 'edge': + tickoffset = width * (position - 0.5) + p.get_height() * 1.5 + else: + tickoffset = width * position + p.get_height() + else: + raise ValueError + + # Check the ticks locates on integer + self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all()) + + if align == 'center': + # Check whether the bar locates on center + self.assertAlmostEqual(axis.get_ticklocs()[0], center) + elif align == 'edge': + # Check whether the bar's edge starts from the tick + self.assertAlmostEqual(axis.get_ticklocs()[0], edge) + else: + raise ValueError + + # Check starting point and axes limit margin + self.assertEqual(ax_min, tick_pos[0] - tickoffset - 0.25) + self.assertEqual(ax_max, tick_pos[-1] - tickoffset + 1) + # Check tick locations and axes limit margin + t_min = axis.get_ticklocs()[0] - tickoffset + t_max = axis.get_ticklocs()[-1] - tickoffset + self.assertAlmostEqual(ax_min, t_min - 0.25) + self.assertAlmostEqual(ax_max, t_max + 1.0) + return axes + + @slow def test_bar_stacked_center(self): # GH2157 df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - ax = df.plot(kind='bar', stacked='True', grid=True) - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width() / 2) + axes = self._check_bar_alignment(df, kind='bar', stacked=True) + # Check the axes has the same drawing range before fixing # GH4525 + self.assertEqual(axes[0].get_xlim(), (-0.5, 4.75)) - ax = df.plot(kind='bar', stacked='True', width=0.9, grid=True) - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width() / 2) + self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9) - ax = df.plot(kind='barh', stacked='True', grid=True) - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height() / 2) + axes = self._check_bar_alignment(df, kind='barh', stacked=True) + self.assertEqual(axes[0].get_ylim(), (-0.5, 4.75)) - ax = df.plot(kind='barh', stacked='True', width=0.9, grid=True) - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height() / 2) + self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9) + @slow def test_bar_center(self): df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - ax = df.plot(kind='bar', grid=True) - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width()) - - ax = df.plot(kind='bar', width=0.9, grid=True) - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width()) + axes = self._check_bar_alignment(df, kind='bar', stacked=False) + self.assertEqual(axes[0].get_xlim(), (-0.75, 4.5)) - ax = df.plot(kind='barh', grid=True) - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height()) - - ax = df.plot(kind='barh', width=0.9, grid=True) - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height()) + self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9) + + axes = self._check_bar_alignment(df, kind='barh', stacked=False) + self.assertEqual(axes[0].get_ylim(), (-0.75, 4.5)) + self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9) + + @slow def test_bar_subplots_center(self): df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) - axes = df.plot(kind='bar', grid=True, subplots=True) + axes = self._check_bar_alignment(df, kind='bar', subplots=True) for ax in axes: - for r in ax.patches: - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width() / 2) + self.assertEqual(ax.get_xlim(), (-0.5, 4.75)) - axes = df.plot(kind='bar', width=0.9, grid=True, subplots=True) + self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9) + + axes = self._check_bar_alignment(df, kind='barh', subplots=True) for ax in axes: - for r in ax.patches: - self.assertEqual(ax.xaxis.get_ticklocs()[0], - ax.patches[0].get_x() + ax.patches[0].get_width() / 2) + self.assertEqual(ax.get_ylim(), (-0.5, 4.75)) - axes = df.plot(kind='barh', grid=True, subplots=True) - for ax in axes: - for r in ax.patches: - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height() / 2) + self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9) - axes = df.plot(kind='barh', width=0.9, grid=True, subplots=True) - for ax in axes: - for r in ax.patches: - self.assertEqual(ax.yaxis.get_ticklocs()[0], - ax.patches[0].get_y() + ax.patches[0].get_height() / 2) + @slow + def test_bar_edge(self): + df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5)) + + self._check_bar_alignment(df, kind='bar', stacked=True, align='edge') + self._check_bar_alignment(df, kind='bar', stacked=True, + width=0.9, align='edge') + self._check_bar_alignment(df, kind='barh', stacked=True, align='edge') + self._check_bar_alignment(df, kind='barh', stacked=True, + width=0.9, align='edge') + + self._check_bar_alignment(df, kind='bar', stacked=False, align='edge') + self._check_bar_alignment(df, kind='bar', stacked=False, + width=0.9, align='edge') + self._check_bar_alignment(df, kind='barh', stacked=False, align='edge') + self._check_bar_alignment(df, kind='barh', stacked=False, + width=0.9, align='edge') + + self._check_bar_alignment(df, kind='bar', subplots=True, align='edge') + self._check_bar_alignment(df, kind='bar', subplots=True, + width=0.9, align='edge') + self._check_bar_alignment(df, kind='barh', subplots=True, align='edge') + self._check_bar_alignment(df, kind='barh', subplots=True, + width=0.9, align='edge') @slow def test_bar_log_no_subplots(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 8ad17fd593871..2b73ae77970bf 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1673,18 +1673,26 @@ def __init__(self, data, **kwargs): self.mark_right = kwargs.pop('mark_right', True) self.stacked = kwargs.pop('stacked', False) - self.bar_width = kwargs.pop('width', 0.5) + self.bar_width = kwargs.pop('width', 0.5) pos = kwargs.pop('position', 0.5) - self.ax_pos = np.arange(len(data)) + self.bar_width * pos + kwargs['align'] = kwargs.pop('align', 'center') + self.tick_pos = np.arange(len(data)) + self.log = kwargs.pop('log',False) MPLPlot.__init__(self, data, **kwargs) if self.stacked or self.subplots: - self.tickoffset = self.bar_width * pos + self.tickoffset = self.bar_width * pos + elif kwargs['align'] == 'edge': + K = self.nseries + w = self.bar_width / K + self.tickoffset = self.bar_width * (pos - 0.5) + w * 1.5 else: K = self.nseries - self.tickoffset = self.bar_width * pos + self.bar_width / K + w = self.bar_width / K + self.tickoffset = self.bar_width * pos + w + self.ax_pos = self.tick_pos - self.tickoffset def _args_adjust(self): if self.rot is None: @@ -1751,19 +1759,21 @@ def _make_plot(self): start = 0 if mpl_le_1_2_1 else None if self.subplots: - rect = bar_f(ax, self.ax_pos, y, self.bar_width, + w = self.bar_width / 2 + rect = bar_f(ax, self.ax_pos + w, y, self.bar_width, start=start, **kwds) ax.set_title(label) elif self.stacked: mask = y > 0 start = np.where(mask, pos_prior, neg_prior) - rect = bar_f(ax, self.ax_pos, y, self.bar_width, start=start, - label=label, **kwds) + w = self.bar_width / 2 + rect = bar_f(ax, self.ax_pos + w, y, self.bar_width, + start=start, label=label, **kwds) pos_prior = pos_prior + np.where(mask, y, 0) neg_prior = neg_prior + np.where(mask, 0, y) else: w = self.bar_width / K - rect = bar_f(ax, self.ax_pos + (i + 1) * w, y, w, + rect = bar_f(ax, self.ax_pos + (i + 1.5) * w, y, w, start=start, label=label, **kwds) rects.append(rect) if self.mark_right: @@ -1789,22 +1799,24 @@ def _post_plot_logic(self): name = self._get_index_name() if self.kind == 'bar': ax.set_xlim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1]) - ax.set_xticks(self.ax_pos + self.tickoffset) + ax.set_xticks(self.tick_pos) ax.set_xticklabels(str_index, rotation=self.rot, fontsize=self.fontsize) if not self.log: # GH3254+ ax.axhline(0, color='k', linestyle='--') if name is not None: ax.set_xlabel(name) - else: + elif self.kind == 'barh': # horizontal bars ax.set_ylim([self.ax_pos[0] - 0.25, self.ax_pos[-1] + 1]) - ax.set_yticks(self.ax_pos + self.tickoffset) + ax.set_yticks(self.tick_pos) ax.set_yticklabels(str_index, rotation=self.rot, fontsize=self.fontsize) ax.axvline(0, color='k', linestyle='--') if name is not None: ax.set_ylabel(name) + else: + raise NotImplementedError(self.kind) # if self.subplots and self.legend: # self.axes[0].legend(loc='best')
Closes #4525. I modified the `BarPlot` internal alignment to meet line coordinates. Previously, `BarPlot` doesn't pass `align` keyword to matplotlib (thus matplotlib uses `align='edge'`), but it adjusts bar locations to looks like `align='center'`. Now `BarPlot` pass `align=center` to matplotlib by default and ticks are being locates on integer value starts from 0 (0.0, 1.0 ...). Drawing bar and line on the same axes looks like below. _Output using current master:_ ![align_incorrect](https://f.cloud.github.com/assets/1696302/2492234/0df22b26-b21c-11e3-8a96-95cffa20a7c6.png) _Output after fix_ ![align_correct](https://f.cloud.github.com/assets/1696302/2492238/5f5bc26a-b21c-11e3-9b70-76892cea9eca.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6691
2014-03-23T00:01:35Z
2014-03-26T20:50:44Z
2014-03-26T20:50:44Z
2014-06-17T15:37:54Z
BUG: Bug in resample with extra bins when using an evenly divisible freq (GH4076)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 55861a0f1b0f0..df0f472c390c7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -185,7 +185,7 @@ Improvements to existing features - Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) - Performance improvement for ``DataFrame.shift`` (:issue: `5609`) - + .. _release.bug_fixes-0.14.0: Bug Fixes @@ -270,6 +270,7 @@ Bug Fixes - Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) - Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) - Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores 'with_index' keyword argument (:issue:`6685`) +- Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) pandas 0.13.1 ------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 15e6381cbe2fa..83964571fca8f 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1486,25 +1486,20 @@ def get_iterator(self, data, axis=0): Generator yielding sequence of (name, subsetted object) for each group """ - if axis == 0: - start = 0 - for edge, label in zip(self.bins, self.binlabels): - yield label, data[start:edge] - start = edge - - if start < len(data): - yield self.binlabels[-1], data[start:] + if isinstance(data, NDFrame): + slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis) + length = len(data.axes[axis]) else: - start = 0 - for edge, label in zip(self.bins, self.binlabels): - inds = lrange(start, edge) - yield label, data.take(inds, axis=axis) - start = edge - - n = len(data.axes[axis]) - if start < n: - inds = lrange(start, n) - yield self.binlabels[-1], data.take(inds, axis=axis) + slicer = lambda start,edge: data[slice(start,edge)] + length = len(data) + + start = 0 + for edge, label in zip(self.bins, self.binlabels): + yield label, slicer(start,edge) + start = edge + + if start < length: + yield self.binlabels[-1], slicer(start,None) def apply(self, f, data, axis=0): result_keys = [] diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4d47750660800..506eb348a8113 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2864,7 +2864,8 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame({ 'Quantity' : np.nan }, - index=date_range('20130901 13:00:00','20131205 13:00:00',freq='5D',name='Date')) + index=date_range('20130901 13:00:00','20131205 13:00:00', + freq='5D',name='Date',closed='left')) expected.iloc[[0,6,18],0] = np.array([24.,6.,9.],dtype='float64') result1 = df.resample('5D',how=sum) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 51144cb3bba2c..8b65882fb1279 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -185,6 +185,12 @@ def _get_time_bins(self, ax): elif not trimmed: labels = labels[:-1] + # if we end up with more labels than bins + # adjust the labels + # GH4076 + if len(bins) < len(labels): + labels = labels[:len(bins)] + return binner, bins, labels def _adjust_bin_edges(self, binner, ax_values): diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 242d656b8794f..55d96ec6fbaeb 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1087,6 +1087,38 @@ def test_resample_doesnt_truncate(self): result = series.resample('D') self.assertEquals(result.index[0], dates[0]) + def test_evenly_divisible_with_no_extra_bins(self): + # 4076 + # when the frequency is evenly divisible, sometimes extra bins + + df = DataFrame(np.random.randn(9, 3), index=date_range('2000-1-1', periods=9)) + result = df.resample('5D') + expected = pd.concat([df.iloc[0:5].mean(),df.iloc[5:].mean()],axis=1).T + expected.index = [Timestamp('2000-1-1'),Timestamp('2000-1-6')] + assert_frame_equal(result,expected) + + index = date_range(start='2001-5-4', periods=28) + df = DataFrame( + [{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90, + 'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 + + [{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10, + 'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28, + index=index.append(index)).sort() + + index = date_range('2001-5-4',periods=4,freq='7D') + expected = DataFrame( + [{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14, + 'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4, + index=index).unstack().swaplevel(1,0).sortlevel() + result = df.resample('7D', how='count') + assert_series_equal(result,expected) + + expected = DataFrame( + [{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700, + 'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4, + index=index) + result = df.resample('7D', how='sum') + assert_frame_equal(result,expected) class TestTimeGrouper(tm.TestCase):
closes #4076 this only happened on the cythonized methods and not the python agg ones (this is why count worked, but sum did not)
https://api.github.com/repos/pandas-dev/pandas/pulls/6690
2014-03-22T23:25:25Z
2014-03-23T13:49:21Z
2014-03-23T13:49:21Z
2014-06-27T12:22:23Z
BUG: NaN values not converted to Stata missing values (GH6684)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 3937b4b30fa0e..55861a0f1b0f0 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -269,6 +269,7 @@ Bug Fixes - Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) - Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) - Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) +- Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores 'with_index' keyword argument (:issue:`6685`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 253b9ac2c7a16..8cf164ba76c21 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1258,7 +1258,8 @@ def to_stata( from pandas.io.stata import StataWriter writer = StataWriter(fname, self, convert_dates=convert_dates, encoding=encoding, byteorder=byteorder, - time_stamp=time_stamp, data_label=data_label) + time_stamp=time_stamp, data_label=data_label, + write_index=write_index) writer.write_file() @Appender(fmt.docstring_to_string, indents=1) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 4bb61e385a75c..fd41961109511 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -990,8 +990,6 @@ def _dtype_to_stata_type(dtype): return chr(255) elif dtype == np.float32: return chr(254) - elif dtype == np.int64: - return chr(253) elif dtype == np.int32: return chr(253) elif dtype == np.int16: @@ -1025,8 +1023,6 @@ def _dtype_to_default_stata_fmt(dtype): return "%10.0g" elif dtype == np.float32: return "%9.0g" - elif dtype == np.int64: - return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: @@ -1108,6 +1104,21 @@ def _write(self, to_write): self._file.write(to_write) + def _replace_nans(self, data): + # return data + """Checks floating point data columns for nans, and replaces these with + the generic Stata for missing value (.)""" + for c in data: + dtype = data[c].dtype + if dtype in (np.float32, np.float64): + if dtype == np.float32: + replacement = self.MISSING_VALUES['f'] + else: + replacement = self.MISSING_VALUES['d'] + data[c] = data[c].fillna(replacement) + + return data + def _check_column_names(self, data): """Checks column names to ensure that they are valid Stata column names. This includes checks for: @@ -1197,6 +1208,8 @@ def __iter__(self): data = _cast_to_stata_types(data) # Ensure column names are strings data = self._check_column_names(data) + # Replace NaNs with Stata missing values + data = self._replace_nans(data) self.datarows = DataFrameRowIter(data) self.nobs, self.nvar = data.shape self.data = data @@ -1340,8 +1353,6 @@ def _write_data_dates(self): var = _pad_bytes(var, typ) self._write(var) else: - if isnull(var): # this only matters for floats - var = MISSING_VALUES[TYPE_MAP[typ]] self._file.write(struct.pack(byteorder+TYPE_MAP[typ], var)) def _null_terminate(self, s, as_string=False): diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index fe79bf20615bb..c5debed6654af 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -13,7 +13,8 @@ import pandas as pd from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv -from pandas.io.stata import read_stata, StataReader, InvalidColumnName +from pandas.io.stata import (read_stata, StataReader, InvalidColumnName, + PossiblePrecisionLoss) import pandas.util.testing as tm from pandas.util.misc import is_little_endian from pandas import compat @@ -142,8 +143,7 @@ def test_read_dta2(self): parsed_117 = self.read_dta(self.dta2_117) # 113 is buggy due ot limits date format support in Stata # parsed_113 = self.read_dta(self.dta2_113) - - np.testing.assert_equal( + tm.assert_equal( len(w), 1) # should get a warning for that format. # buggy test because of the NaT comparison on certain platforms @@ -206,7 +206,7 @@ def test_read_write_dta5(self): original.index.name = 'index' with tm.ensure_clean() as path: - original.to_stata(path, None, False) + original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) @@ -221,7 +221,7 @@ def test_write_dta6(self): original['quarter'] = original['quarter'].astype(np.int32) with tm.ensure_clean() as path: - original.to_stata(path, None, False) + original.to_stata(path, None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) @@ -257,7 +257,7 @@ def test_read_write_dta10(self): original['integer'] = original['integer'].astype(np.int32) with tm.ensure_clean() as path: - original.to_stata(path, {'datetime': 'tc'}, False) + original.to_stata(path, {'datetime': 'tc'}) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), original) @@ -295,9 +295,9 @@ def test_read_write_dta11(self): with tm.ensure_clean() as path: with warnings.catch_warnings(record=True) as w: - original.to_stata(path, None, False) - np.testing.assert_equal( - len(w), 1) # should get a warning for that format. + original.to_stata(path, None) + # should get a warning for that format. + tm.assert_equal(len(w), 1) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) @@ -324,13 +324,12 @@ def test_read_write_dta12(self): with tm.ensure_clean() as path: with warnings.catch_warnings(record=True) as w: - original.to_stata(path, None, False) - np.testing.assert_equal( - len(w), 1) # should get a warning for that format. + original.to_stata(path, None) + tm.assert_equal(len(w), 1) # should get a warning for that format. written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) - + def test_read_write_dta13(self): s1 = Series(2**9, dtype=np.int16) s2 = Series(2**17, dtype=np.int32) @@ -366,7 +365,7 @@ def test_read_write_reread_dta14(self): tm.assert_frame_equal(parsed_114, parsed_115) with tm.ensure_clean() as path: - parsed_114.to_stata(path, {'date_td': 'td'}, write_index=False) + parsed_114.to_stata(path, {'date_td': 'td'}) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114) @@ -406,7 +405,7 @@ def test_numeric_column_names(self): with warnings.catch_warnings(record=True) as w: tm.assert_produces_warning(original.to_stata(path), InvalidColumnName) # should produce a single warning - np.testing.assert_equal(len(w), 1) + tm.assert_equal(len(w), 1) written_and_read_again = self.read_dta(path) written_and_read_again = written_and_read_again.set_index('index') @@ -415,7 +414,102 @@ def test_numeric_column_names(self): written_and_read_again.columns = map(convert_col_name, columns) tm.assert_frame_equal(original, written_and_read_again) + def test_nan_to_missing_value(self): + s1 = Series(np.arange(4.0), dtype=np.float32) + s2 = Series(np.arange(4.0), dtype=np.float64) + s1[::2] = np.nan + s2[1::2] = np.nan + original = DataFrame({'s1': s1, 's2': s2}) + original.index.name = 'index' + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + written_and_read_again = written_and_read_again.set_index('index') + tm.assert_frame_equal(written_and_read_again, original) + + def test_no_index(self): + columns = ['x', 'y'] + original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), + columns=columns) + original.index.name = 'index_not_written' + with tm.ensure_clean() as path: + original.to_stata(path, write_index=False) + written_and_read_again = self.read_dta(path) + tm.assertRaises(KeyError, + lambda: written_and_read_again['index_not_written']) + + def test_string_no_dates(self): + s1 = Series(['a', 'A longer string']) + s2 = Series([1.0, 2.0], dtype=np.float64) + original = DataFrame({'s1': s1, 's2': s2}) + original.index.name = 'index' + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal(written_and_read_again.set_index('index'), + original) + + def test_large_value_conversion(self): + s0 = Series([1, 99], dtype=np.int8) + s1 = Series([1, 127], dtype=np.int8) + s2 = Series([1, 2 ** 15 - 1], dtype=np.int16) + s3 = Series([1, 2 ** 63 - 1], dtype=np.int64) + original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3}) + original.index.name = 'index' + with tm.ensure_clean() as path: + with warnings.catch_warnings(record=True) as w: + tm.assert_produces_warning(original.to_stata(path), + PossiblePrecisionLoss) + # should produce a single warning + tm.assert_equal(len(w), 1) + + written_and_read_again = self.read_dta(path) + modified = original.copy() + modified['s1'] = Series(modified['s1'], dtype=np.int16) + modified['s2'] = Series(modified['s2'], dtype=np.int32) + modified['s3'] = Series(modified['s3'], dtype=np.float64) + tm.assert_frame_equal(written_and_read_again.set_index('index'), + modified) + + def test_dates_invalid_column(self): + original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)]) + original.index.name = 'index' + with tm.ensure_clean() as path: + with warnings.catch_warnings(record=True) as w: + tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}), + InvalidColumnName) + tm.assert_equal(len(w), 1) + + written_and_read_again = self.read_dta(path) + modified = original.copy() + modified.columns = ['_0'] + tm.assert_frame_equal(written_and_read_again.set_index('index'), + modified) + + def test_date_export_formats(self): + columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty'] + conversions = dict(((c, c) for c in columns)) + data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns) + original = DataFrame([data], columns=columns) + original.index.name = 'index' + expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time + datetime(2006, 11, 20), # Day + datetime(2006, 11, 19), # Week + datetime(2006, 11, 1), # Month + datetime(2006, 10, 1), # Quarter year + datetime(2006, 7, 1), # Half year + datetime(2006, 1, 1)] # Year + + expected = DataFrame([expected_values], columns=columns) + expected.index.name = 'index' + with tm.ensure_clean() as path: + original.to_stata(path, conversions) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal(written_and_read_again.set_index('index'), + expected) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) +
closes #6684 Stata does not correctly handle NaNs, and so these must be replaced with Stata missing values (. by default). The fix checks floating point columns for nan and replaces these with the Stata numeric code for (.). The write_index option was also being ignored by omission. This has been fixed and numerous tests which were not correct have been fixed.
https://api.github.com/repos/pandas-dev/pandas/pulls/6685
2014-03-21T22:22:24Z
2014-03-23T13:41:24Z
2014-03-23T13:41:24Z
2014-06-13T03:14:11Z
BUG: Bug in binary operations with a rhs of a Series not aligning (GH6681)
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 159cd05194300..fe3fc42992468 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -140,10 +140,9 @@ either match on the *index* or *columns* via the **axis** keyword: .. ipython:: python - d = {'one' : Series(randn(3), index=['a', 'b', 'c']), - 'two' : Series(randn(4), index=['a', 'b', 'c', 'd']), - 'three' : Series(randn(3), index=['b', 'c', 'd'])} - df = df_orig = DataFrame(d) + df = DataFrame({'one' : Series(randn(3), index=['a', 'b', 'c']), + 'two' : Series(randn(4), index=['a', 'b', 'c', 'd']), + 'three' : Series(randn(3), index=['b', 'c', 'd'])}) df row = df.ix[1] column = df['two'] @@ -154,6 +153,20 @@ either match on the *index* or *columns* via the **axis** keyword: df.sub(column, axis='index') df.sub(column, axis=0) +.. ipython:: python + :suppress: + + df_orig = df + +Furthermore you can align a level of a multi-indexed DataFrame with a Series. + +.. ipython:: python + + dfmi = df.copy() + dfmi.index = MultiIndex.from_tuples([(1,'a'),(1,'b'),(1,'c'),(2,'a')], + names=['first','second']) + dfmi.sub(column, axis=0, level='second') + With Panel, describing the matching behavior is a bit more difficult, so the arithmetic methods instead (and perhaps confusingly?) give you the option to specify the *broadcast axis*. For example, suppose we wished to demean the diff --git a/doc/source/io.rst b/doc/source/io.rst index f43582ded4473..4d97c43e85de8 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1843,11 +1843,11 @@ the sheet names using the ``sheet_names`` attribute. .. versionadded:: 0.13 There are now two ways to read in sheets from an Excel file. You can provide -either the index of a sheet or its name to by passing different values for -``sheet_name``. +either the index of a sheet or its name to by passing different values for +``sheet_name``. - Pass a string to refer to the name of a particular sheet in the workbook. -- Pass an integer to refer to the index of a sheet. Indices follow Python +- Pass an integer to refer to the index of a sheet. Indices follow Python convention, beginning at 0. - The default value is ``sheet_name=0``. This reads the first sheet. diff --git a/doc/source/release.rst b/doc/source/release.rst index fe57133752ce3..3937b4b30fa0e 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -268,6 +268,7 @@ Bug Fixes - Bug in ``fillna`` with ``limit`` and ``value`` specified - Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) - Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) +- Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index df43dae257408..253b9ac2c7a16 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2801,12 +2801,12 @@ def _combine_series(self, other, func, fill_value=None, axis=None, if axis is not None: axis = self._get_axis_name(axis) if axis == 'index': - return self._combine_match_index(other, func, fill_value) + return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: - return self._combine_match_columns(other, func, fill_value) - return self._combine_series_infer(other, func, fill_value) + return self._combine_match_columns(other, func, level=level, fill_value=fill_value) + return self._combine_series_infer(other, func, level=level, fill_value=fill_value) - def _combine_series_infer(self, other, func, fill_value=None): + def _combine_series_infer(self, other, func, level=None, fill_value=None): if len(other) == 0: return self * NA @@ -2822,12 +2822,12 @@ def _combine_series_infer(self, other, func, fill_value=None): "DataFrame.<op> to explicitly broadcast arithmetic " "operations along the index"), FutureWarning) - return self._combine_match_index(other, func, fill_value) + return self._combine_match_index(other, func, level=level, fill_value=fill_value) else: - return self._combine_match_columns(other, func, fill_value) + return self._combine_match_columns(other, func, level=level, fill_value=fill_value) - def _combine_match_index(self, other, func, fill_value=None): - left, right = self.align(other, join='outer', axis=0, copy=False) + def _combine_match_index(self, other, func, level=None, fill_value=None): + left, right = self.align(other, join='outer', axis=0, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported." % fill_value) @@ -2835,8 +2835,8 @@ def _combine_match_index(self, other, func, fill_value=None): index=left.index, columns=self.columns, copy=False) - def _combine_match_columns(self, other, func, fill_value=None): - left, right = self.align(other, join='outer', axis=1, copy=False) + def _combine_match_columns(self, other, func, level=None, fill_value=None): + left, right = self.align(other, join='outer', axis=1, level=level, copy=False) if fill_value is not None: raise NotImplementedError("fill_value %r not supported" % fill_value) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba6e7a33a7515..6c57a9ce5beaa 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2996,23 +2996,30 @@ def _align_series(self, other, join='outer', axis=None, level=None, else: + # for join compat if we have an unnamed index, but + # are specifying a level join + other_index = other.index + if level is not None and other.index.name is None: + other_index = other_index.set_names([level]) + # one has > 1 ndim fdata = self._data if axis == 0: join_index = self.index lidx, ridx = None, None - if not self.index.equals(other.index): + if not self.index.equals(other_index): join_index, lidx, ridx = self.index.join( - other.index, how=join, return_indexers=True) + other_index, how=join, return_indexers=True) if lidx is not None: fdata = fdata.reindex_indexer(join_index, lidx, axis=1) + elif axis == 1: join_index = self.columns lidx, ridx = None, None - if not self.columns.equals(other.index): + if not self.columns.equals(other_index): join_index, lidx, ridx = \ - self.columns.join(other.index, how=join, + self.columns.join(other_index, how=join, return_indexers=True) if lidx is not None: @@ -3024,7 +3031,11 @@ def _align_series(self, other, join='outer', axis=None, level=None, fdata = fdata.copy() left_result = DataFrame(fdata) - right_result = other if ridx is None else other.reindex(join_index) + + if ridx is None: + right_result = other + else: + right_result = other.reindex(join_index, level=level) # fill fill_na = notnull(fill_value) or (method is not None) diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index a69c07494af8a..bd34c7e5f02b2 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -456,11 +456,13 @@ def _combine_frame(self, other, func, fill_value=None, level=None): default_fill_value=new_fill_value, fill_value=new_fill_value).__finalize__(self) - def _combine_match_index(self, other, func, fill_value=None): + def _combine_match_index(self, other, func, level=None, fill_value=None): new_data = {} if fill_value is not None: raise NotImplementedError + if level is not None: + raise NotImplementedError new_index = self.index.union(other.index) this = self @@ -486,7 +488,7 @@ def _combine_match_index(self, other, func, fill_value=None): default_fill_value=fill_value, fill_value=self.default_fill_value).__finalize__(self) - def _combine_match_columns(self, other, func, fill_value): + def _combine_match_columns(self, other, func, level=None, fill_value=None): # patched version of DataFrame._combine_match_columns to account for # NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series, # where 3.0 is numpy.float64 and series is a SparseSeries. Still @@ -494,6 +496,8 @@ def _combine_match_columns(self, other, func, fill_value): if fill_value is not None: raise NotImplementedError + if level is not None: + raise NotImplementedError new_data = {} diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index f61880f97c7a5..32805d47821f4 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4941,6 +4941,38 @@ def test_arith_flex_frame(self): with assertRaisesRegexp(NotImplementedError, 'fill_value'): self.frame.add(self.frame.irow(0), axis='index', fill_value=3) + def test_binary_ops_align(self): + + # test aligning binary ops + + # GH 6681 + index=MultiIndex.from_product([list('abc'), + ['one','two','three'], + [1,2,3]], + names=['first','second','third']) + + df = DataFrame(np.arange(27*3).reshape(27,3), + index=index, + columns=['value1','value2','value3']).sortlevel() + + idx = pd.IndexSlice + for op in ['add','sub','mul','div','truediv']: + opa = getattr(operator,op,None) + if opa is None: + continue + + x = Series([ 1.0, 10.0, 100.0], [1,2,3]) + result = getattr(df,op)(x,level='third',axis=0) + + expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel() + assert_frame_equal(result, expected) + + x = Series([ 1.0, 10.0], ['two','three']) + result = getattr(df,op)(x,level='second',axis=0) + + expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel() + assert_frame_equal(result, expected) + def test_arith_mixed(self): left = DataFrame({'A': ['a', 'b', 'c'],
closes #6681 ``` In [11]: index=MultiIndex.from_product([list('abc'), ['one','two','three'], [1,2,3]], names=['first','second','third']) In [12]: df = DataFrame(np.arange(27*3).reshape(27,3), index=index, columns=['value1','value2','value3']).sortlevel() In [13]: df Out[13]: value1 value2 value3 first second third a one 1 0 1 2 2 3 4 5 3 6 7 8 three 1 18 19 20 2 21 22 23 3 24 25 26 two 1 9 10 11 2 12 13 14 3 15 16 17 b one 1 27 28 29 2 30 31 32 3 33 34 35 three 1 45 46 47 2 48 49 50 3 51 52 53 two 1 36 37 38 2 39 40 41 3 42 43 44 c one 1 54 55 56 2 57 58 59 3 60 61 62 three 1 72 73 74 2 75 76 77 3 78 79 80 two 1 63 64 65 2 66 67 68 3 69 70 71 [27 rows x 3 columns] In [15]: x = Series([ 1.0, 10.0], ['two','three']) In [16]: x Out[16]: two 1 three 10 dtype: float64 ``` Passing a level now aligns by that level ``` In [14]: df.mul(x,level='second',axis=0) Out[14]: value1 value2 value3 first second third a one 1 NaN NaN NaN 2 NaN NaN NaN 3 NaN NaN NaN three 1 180 190 200 2 210 220 230 3 240 250 260 two 1 9 10 11 2 12 13 14 3 15 16 17 b one 1 NaN NaN NaN 2 NaN NaN NaN 3 NaN NaN NaN three 1 450 460 470 2 480 490 500 3 510 520 530 two 1 36 37 38 2 39 40 41 3 42 43 44 c one 1 NaN NaN NaN 2 NaN NaN NaN 3 NaN NaN NaN three 1 720 730 740 2 750 760 770 3 780 790 800 two 1 63 64 65 2 66 67 68 3 69 70 71 [27 rows x 3 columns] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6682
2014-03-21T17:30:43Z
2014-03-21T20:10:13Z
2014-03-21T20:10:13Z
2014-06-23T20:21:08Z
BUG: legend behaves inconsistently when plotting to the same axes
diff --git a/doc/source/release.rst b/doc/source/release.rst index 03b89f9077994..a632d69eef734 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -384,6 +384,7 @@ Bug Fixes group match wasn't renamed to the group name - Bug in ``DataFrame.to_csv`` where setting `index` to `False` ignored the `header` kwarg (:issue:`6186`) +- Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) pandas 0.13.1 ------------- diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index f42d2b3b52f55..15d05ff046bb1 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -66,7 +66,7 @@ for controlling the look of the plot: .. ipython:: python @savefig series_plot_basic2.png - plt.figure(); ts.plot(style='k--', label='Series'); plt.legend() + plt.figure(); ts.plot(style='k--', label='Series'); On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: @@ -76,7 +76,7 @@ On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: df = df.cumsum() @savefig frame_plot_basic.png - plt.figure(); df.plot(); plt.legend(loc='best') + plt.figure(); df.plot(); You may set the ``legend`` argument to ``False`` to hide the legend, which is shown by default. @@ -91,7 +91,7 @@ Some other options are available, like plotting each Series on a different axis: .. ipython:: python @savefig frame_plot_subplots.png - df.plot(subplots=True, figsize=(6, 6)); plt.legend(loc='best') + df.plot(subplots=True, figsize=(6, 6)); You may pass ``logy`` to get a log-scale Y axis. diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 5beb5a05a800d..fceec8cf00e92 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -490,29 +490,34 @@ def test_subplots(self): df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10])) - axes = df.plot(subplots=True, sharex=True, legend=True) + for kind in ['bar', 'barh', 'line']: + axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) - for ax in axes: - self.assertIsNotNone(ax.get_legend()) - - axes = df.plot(subplots=True, sharex=True) - for ax in axes[:-2]: - [self.assert_(not label.get_visible()) - for label in ax.get_xticklabels()] - [self.assert_(label.get_visible()) - for label in ax.get_yticklabels()] + for ax, column in zip(axes, df.columns): + self._check_legend_labels(ax, [column]) - [self.assert_(label.get_visible()) - for label in axes[-1].get_xticklabels()] - [self.assert_(label.get_visible()) - for label in axes[-1].get_yticklabels()] + axes = df.plot(kind=kind, subplots=True, sharex=True) + for ax in axes[:-2]: + [self.assert_(not label.get_visible()) + for label in ax.get_xticklabels()] + [self.assert_(label.get_visible()) + for label in ax.get_yticklabels()] - axes = df.plot(subplots=True, sharex=False) - for ax in axes: [self.assert_(label.get_visible()) - for label in ax.get_xticklabels()] + for label in axes[-1].get_xticklabels()] [self.assert_(label.get_visible()) - for label in ax.get_yticklabels()] + for label in axes[-1].get_yticklabels()] + + axes = df.plot(kind=kind, subplots=True, sharex=False) + for ax in axes: + [self.assert_(label.get_visible()) + for label in ax.get_xticklabels()] + [self.assert_(label.get_visible()) + for label in ax.get_yticklabels()] + + axes = df.plot(kind=kind, subplots=True, legend=False) + for ax in axes: + self.assertTrue(ax.get_legend() is None) @slow def test_bar_colors(self): @@ -873,7 +878,7 @@ def test_kde(self): _check_plot_works(df.plot, kind='kde') _check_plot_works(df.plot, kind='kde', subplots=True) ax = df.plot(kind='kde') - self.assertIsNotNone(ax.get_legend()) + self._check_legend_labels(ax, df.columns) axes = df.plot(kind='kde', logy=True, subplots=True) for ax in axes: self.assertEqual(ax.get_yscale(), 'log') @@ -1046,6 +1051,64 @@ def test_plot_int_columns(self): df = DataFrame(randn(100, 4)).cumsum() _check_plot_works(df.plot, legend=True) + def _check_legend_labels(self, ax, labels): + import pandas.core.common as com + labels = [com.pprint_thing(l) for l in labels] + self.assertTrue(ax.get_legend() is not None) + legend_labels = [t.get_text() for t in ax.get_legend().get_texts()] + self.assertEqual(labels, legend_labels) + + @slow + def test_df_legend_labels(self): + kinds = 'line', 'bar', 'barh', 'kde', 'density' + df = DataFrame(randn(3, 3), columns=['a', 'b', 'c']) + df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f']) + df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i']) + df4 = DataFrame(randn(3, 3), columns=['j', 'k', 'l']) + + for kind in kinds: + ax = df.plot(kind=kind, legend=True) + self._check_legend_labels(ax, df.columns) + + ax = df2.plot(kind=kind, legend=False, ax=ax) + self._check_legend_labels(ax, df.columns) + + ax = df3.plot(kind=kind, legend=True, ax=ax) + self._check_legend_labels(ax, df.columns + df3.columns) + + ax = df4.plot(kind=kind, legend='reverse', ax=ax) + expected = list(df.columns + df3.columns) + list(reversed(df4.columns)) + self._check_legend_labels(ax, expected) + + # Secondary Y + ax = df.plot(legend=True, secondary_y='b') + self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + ax = df2.plot(legend=False, ax=ax) + self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax) + self._check_legend_labels(ax, ['a', 'b (right)', 'c', 'g', 'h (right)', 'i']) + + # Time Series + ind = date_range('1/1/2014', periods=3) + df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind) + df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind) + df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind) + ax = df.plot(legend=True, secondary_y='b') + self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + ax = df2.plot(legend=False, ax=ax) + self._check_legend_labels(ax, ['a', 'b (right)', 'c']) + ax = df3.plot(legend=True, ax=ax) + self._check_legend_labels(ax, ['a', 'b (right)', 'c', 'g', 'h', 'i']) + + # scatter + ax = df.plot(kind='scatter', x='a', y='b', label='data1') + self._check_legend_labels(ax, ['data1']) + ax = df2.plot(kind='scatter', x='d', y='e', legend=False, + label='data2', ax=ax) + self._check_legend_labels(ax, ['data1']) + ax = df3.plot(kind='scatter', x='g', y='h', label='data3', ax=ax) + self._check_legend_labels(ax, ['data1', 'data3']) + def test_legend_name(self): multi = DataFrame(randn(4, 4), columns=[np.array(['a', 'a', 'b', 'b']), @@ -1056,6 +1119,20 @@ def test_legend_name(self): leg_title = ax.legend_.get_title() self.assertEqual(leg_title.get_text(), 'group,individual') + df = DataFrame(randn(5, 5)) + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + self.assertEqual(leg_title.get_text(), 'group,individual') + + df.columns.name = 'new' + ax = df.plot(legend=False, ax=ax) + leg_title = ax.legend_.get_title() + self.assertEqual(leg_title.get_text(), 'group,individual') + + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + self.assertEqual(leg_title.get_text(), 'new') + def _check_plot_fails(self, f, *args, **kwargs): with tm.assertRaises(Exception): f(*args, **kwargs) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 42135e2186468..7e67c48572f51 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -784,8 +784,10 @@ class MPLPlot(object): """ _default_rot = 0 - _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog'] - _attr_defaults = {'logy': False, 'logx': False, 'loglog': False} + _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', + 'mark_right'] + _attr_defaults = {'logy': False, 'logx': False, 'loglog': False, + 'mark_right': True} def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, sharey=False, use_index=True, @@ -823,6 +825,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, self.grid = grid self.legend = legend + self.legend_handles = [] + self.legend_labels = [] for attr in self._pop_attributes: value = kwds.pop(attr, self._attr_defaults.get(attr, None)) @@ -919,6 +923,7 @@ def generate(self): self._setup_subplots() self._make_plot() self._add_table() + self._make_legend() self._post_plot_logic() self._adorn_subplots() @@ -1077,6 +1082,57 @@ def legend_title(self): else: return None + def _add_legend_handle(self, handle, label, index=None): + if not label is None: + if self.mark_right and index is not None: + if self.on_right(index): + label = label + ' (right)' + self.legend_handles.append(handle) + self.legend_labels.append(label) + + def _make_legend(self): + ax, leg = self._get_ax_legend(self.axes[0]) + + handles = [] + labels = [] + title = '' + + if not self.subplots: + if not leg is None: + title = leg.get_title().get_text() + handles = leg.legendHandles + labels = [x.get_text() for x in leg.get_texts()] + + if self.legend: + if self.legend == 'reverse': + self.legend_handles = reversed(self.legend_handles) + self.legend_labels = reversed(self.legend_labels) + + handles += self.legend_handles + labels += self.legend_labels + if not self.legend_title is None: + title = self.legend_title + + if len(handles) > 0: + ax.legend(handles, labels, loc='best', title=title) + + elif self.subplots and self.legend: + for ax in self.axes: + ax.legend(loc='best') + + + def _get_ax_legend(self, ax): + leg = ax.get_legend() + other_ax = (getattr(ax, 'right_ax', None) or + getattr(ax, 'left_ax', None)) + other_leg = None + if other_ax is not None: + other_leg = other_ax.get_legend() + if leg is None and other_leg is not None: + leg = other_leg + ax = other_ax + return ax, leg + @cache_readonly def plt(self): import matplotlib.pyplot as plt @@ -1205,12 +1261,6 @@ def _maybe_add_color(self, colors, kwds, style, i): if has_color and (style is None or re.match('[a-z]+', style) is None): kwds['color'] = colors[i % len(colors)] - def _get_marked_label(self, label, col_num): - if self.on_right(col_num): - return label + ' (right)' - else: - return label - def _parse_errorbars(self, error_dim='y', **kwds): ''' Look for error keyword arguments and return the actual errorbar data @@ -1330,22 +1380,9 @@ def _make_plot(self): else: args = (ax, ind, y, style) - plotf(*args, **kwds) - ax.grid(self.grid) + newlines = plotf(*args, **kwds) + self._add_legend_handle(newlines[0], label) - def _post_plot_logic(self): - if self.legend: - for ax in self.axes: - ax.legend(loc='best') - leg = self.axes[0].get_legend() - if leg is not None: - lines = leg.get_lines() - labels = [x.get_text() for x in leg.get_texts()] - - if self.legend == 'reverse': - lines = reversed(lines) - labels = reversed(labels) - ax.legend(lines, labels, loc='best', title=self.legend_title) class ScatterPlot(MPLPlot): def __init__(self, data, x, y, **kwargs): @@ -1364,7 +1401,15 @@ def __init__(self, data, x, y, **kwargs): def _make_plot(self): x, y, data = self.x, self.y, self.data ax = self.axes[0] - ax.scatter(data[x].values, data[y].values, **self.kwds) + + if self.legend and hasattr(self, 'label'): + label = self.label + else: + label = None + scatter = ax.scatter(data[x].values, data[y].values, label=label, + **self.kwds) + + self._add_legend_handle(scatter, label) def _post_plot_logic(self): ax = self.axes[0] @@ -1422,7 +1467,6 @@ def _post_plot_logic(self): class LinePlot(MPLPlot): def __init__(self, data, **kwargs): - self.mark_right = kwargs.pop('mark_right', True) MPLPlot.__init__(self, data, **kwargs) self.x_compat = plot_params['x_compat'] if 'x_compat' in self.kwds: @@ -1483,7 +1527,6 @@ def _make_plot(self): else: from pandas.core.frame import DataFrame lines = [] - labels = [] x = self._get_xticks(convert_period=True) plotf = self._get_plot_function() @@ -1519,22 +1562,16 @@ def _make_plot(self): else: args = (ax, x, y) - newline = plotf(*args, **kwds)[0] - lines.append(newline) + newlines = plotf(*args, **kwds) - if self.mark_right: - labels.append(self._get_marked_label(label, i)) - else: - labels.append(label) + self._add_legend_handle(newlines[0], label, index=i) - ax.grid(self.grid) + lines.append(newlines[0]) if self._is_datetype(): left, right = _get_xlim(lines) ax.set_xlim(left, right) - self._make_legend(lines, labels) - def _make_ts_plot(self, data, **kwargs): from pandas.tseries.plotting import tsplot from pandas.core.frame import DataFrame @@ -1543,8 +1580,6 @@ def _make_ts_plot(self, data, **kwargs): colors = self._get_colors() plotf = self._get_plot_function() - lines = [] - labels = [] def _plot(data, col_num, ax, label, style, **kwds): @@ -1556,13 +1591,7 @@ def _plot(data, col_num, ax, label, style, **kwds): newlines = tsplot(data, plotf, ax=ax, label=label, **kwds) - ax.grid(self.grid) - lines.append(newlines[0]) - - if self.mark_right: - labels.append(self._get_marked_label(label, col_num)) - else: - labels.append(label) + self._add_legend_handle(newlines[0], label, index=col_num) if isinstance(data, Series): ax = self._get_ax(0) # self.axes[0] @@ -1597,37 +1626,6 @@ def _plot(data, col_num, ax, label, style, **kwds): _plot(data[col], i, ax, label, style, **kwds) - self._make_legend(lines, labels) - - def _make_legend(self, lines, labels): - ax, leg = self._get_ax_legend(self.axes[0]) - - if not self.subplots: - if leg is not None: - ext_lines = leg.get_lines() - ext_labels = [x.get_text() for x in leg.get_texts()] - ext_lines.extend(lines) - ext_labels.extend(labels) - ax.legend(ext_lines, ext_labels, loc='best', - title=self.legend_title) - elif self.legend: - if self.legend == 'reverse': - lines = reversed(lines) - labels = reversed(labels) - ax.legend(lines, labels, loc='best', title=self.legend_title) - - def _get_ax_legend(self, ax): - leg = ax.get_legend() - other_ax = (getattr(ax, 'right_ax', None) or - getattr(ax, 'left_ax', None)) - other_leg = None - if other_ax is not None: - other_leg = other_ax.get_legend() - if leg is None and other_leg is not None: - leg = other_leg - ax = other_ax - return ax, leg - def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames @@ -1679,16 +1677,12 @@ def _post_plot_logic(self): if index_name is not None: ax.set_xlabel(index_name) - if self.subplots and self.legend: - for ax in self.axes: - ax.legend(loc='best') class BarPlot(MPLPlot): _default_rot = {'bar': 90, 'barh': 0} def __init__(self, data, **kwargs): - self.mark_right = kwargs.pop('mark_right', True) self.stacked = kwargs.pop('stacked', False) self.bar_width = kwargs.pop('width', 0.5) @@ -1739,8 +1733,6 @@ def _make_plot(self): colors = self._get_colors() ncolors = len(colors) - rects = [] - labels = [] bar_f = self.bar_f @@ -1778,8 +1770,8 @@ def _make_plot(self): if self.subplots: w = self.bar_width / 2 - rect = bar_f(ax, self.ax_pos + w, y, self.bar_width, - start=start, **kwds) + rect = bar_f(ax, self.ax_pos + w, y, self.bar_width, + start=start, label=label, **kwds) ax.set_title(label) elif self.stacked: mask = y > 0 @@ -1793,19 +1785,8 @@ def _make_plot(self): w = self.bar_width / K rect = bar_f(ax, self.ax_pos + (i + 1.5) * w, y, w, start=start, label=label, **kwds) - rects.append(rect) - if self.mark_right: - labels.append(self._get_marked_label(label, i)) - else: - labels.append(label) - - if self.legend and not self.subplots: - patches = [r[0] for r in rects] - if self.legend == 'reverse': - patches = reversed(patches) - labels = reversed(labels) - self.axes[0].legend(patches, labels, loc='best', - title=self.legend_title) + + self._add_legend_handle(rect, label, index=i) def _post_plot_logic(self): for ax in self.axes:
There seems to be some inconsistencies related to `DataFrame.plot` and `Series.plot` legend behaviors. _Problems:_ - When `DataFrame.plot` or `Series.plot` plots data on the same axes repeatedly: - If the target axes already has a legend, line plot always appends its legend to existing one ignoring `legend` kw and existing legend will be drawn as line artist regardless of actual artist type. Also, legend cannot be `reverse`ed if the axes already has a legend. - Bar/BarH plot deletes the existing legend and overwrites with latest one. - KDE plot appends its legend to the existing one, and will apply `reverse` to all artists including the existing one. - When `subplots` is enabled, line plot draws legends on each axes but bar plot doesn't. - Scatter plot does not draw legend even if `label` keyword is passed. _Fix:_ I've prepared a fix based on following concept, except `hexbin` which doesn't use legend. - Legend should be drawn by the order of the artists drawn. - Each legend should be drawn according to the passed `legend` value. - If df1 plots with `legend=True` and df2 with `legend=False`, only df1's legend should appear. - If df2 plots with `legend='reverse'`d, only df2's legend should be reversed. - When `subplots=True` and `legend=True`, each subplot axes should have its own legend (standardize current line plot behavior). _Example Code_ ``` df1 = DataFrame(randn(6, 3), index=range(6), columns=['a', 'b', 'c']) df2 = DataFrame(randn(6, 3), index=range(6), columns=['d', 'e', 'f']) df3 = DataFrame(randn(6, 3), index=range(6), columns=['x', 'y', 'z']) fig, axes = plt.subplots(1, 5, figsize=(14, 3)) for i, (legend, df) in enumerate(zip([True, False, 'reverse'], [df1, df2, df3])): df.plot(ax=axes[0], legend=legend, title='line') df.plot(kind='bar', ax=axes[1], legend=legend, title='bar') df.plot(kind='barh', ax=axes[2], legend=legend, title='barh') df.plot(kind='kde', ax=axes[3], legend=legend, title='kde') df.plot(kind='scatter', ax=axes[4], x=df.columns[0], y=df.columns[1], label=i, legend=legend, title='scatter') plt.show() ``` _Output using current repository_ - For line, bar, kde plot, expected legend is `a, b, c, z, y x`. Because df2 was plot by `legend=False`, and df3 was plot by `legend='reverse'`. - For scatter plot, `0, 2` is expected because df2 was plot by `legend=False`. ![figure_legend_current](https://f.cloud.github.com/assets/1696302/2480801/ba3c694a-b0c9-11e3-8f5b-d2ebec0798e3.png) _Output after fix_ ![figure_fixed](https://f.cloud.github.com/assets/1696302/2480773/b1bd8f70-b0c8-11e3-9654-3e3c2b20157b.png) If there is anything should be considered, please let me know. Thanks.
https://api.github.com/repos/pandas-dev/pandas/pulls/6678
2014-03-21T07:30:24Z
2014-04-22T13:10:31Z
2014-04-22T13:10:31Z
2014-06-12T19:37:46Z
ENH: define the order of resolution for index vs columns in query/eval
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index bca009c6b8931..c1b8044ea305b 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -784,6 +784,29 @@ If instead you don't want to or cannot name your index, you can use the name del old_index +.. note:: + + If the name of your index overlaps with a column name, the column name is + given precedence. For example, + + .. ipython:: python + + df = DataFrame({'a': randint(5, size=5)}) + df.index.name = 'a' + df.query('a > 2') # uses the column 'a', not the index + + You can still use the index in a query expression by using the special + identifier 'index': + + .. ipython:: python + + df.query('index > 2') + + If for some reason you have a column named ``index``, then you can refer to + the index as ``ilevel_0`` as well, but at this point you should consider + renaming your columns to something less ambiguous. + + :class:`~pandas.MultiIndex` :meth:`~pandas.DataFrame.query` Syntax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/release.rst b/doc/source/release.rst index c0bd8f424c2b2..fe57133752ce3 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -143,6 +143,8 @@ API Changes - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) + - Define and document the order of column vs index names in query/eval + (:issue:`6676`) Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4f23fd69d7621..df43dae257408 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1855,7 +1855,7 @@ def eval(self, expr, **kwargs): kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() - resolvers = index_resolvers, dict(self.iteritems()) + resolvers = dict(self.iteritems()), index_resolvers kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers return _eval(expr, **kwargs) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4391de0ebfe58..f61880f97c7a5 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12583,7 +12583,7 @@ def setUpClass(cls): super(TestDataFrameQueryNumExprPandas, cls).setUpClass() cls.engine = 'numexpr' cls.parser = 'pandas' - tm.skip_if_no_ne() + tm.skip_if_no_ne(cls.engine) @classmethod def tearDownClass(cls): @@ -12867,6 +12867,31 @@ def test_query_undefined_local(self): "local variable 'c' is not defined"): df.query('a == @c', engine=engine, parser=parser) + def test_index_resolvers_come_after_columns_with_the_same_name(self): + n = 1 + a = np.r_[20:101:20] + + df = DataFrame({'index': a, 'b': np.random.randn(a.size)}) + df.index.name = 'index' + result = df.query('index > 5', engine=self.engine, parser=self.parser) + expected = df[df['index'] > 5] + tm.assert_frame_equal(result, expected) + + df = DataFrame({'index': a, 'b': np.random.randn(a.size)}) + result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + + df = DataFrame({'a': a, 'b': np.random.randn(a.size)}) + df.index.name = 'a' + result = df.query('a > 5', engine=self.engine, parser=self.parser) + expected = df[df.a > 5] + tm.assert_frame_equal(result, expected) + + result = df.query('index > 5', engine=self.engine, parser=self.parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
closes #6676
https://api.github.com/repos/pandas-dev/pandas/pulls/6677
2014-03-21T00:09:32Z
2014-03-21T12:46:08Z
2014-03-21T12:46:08Z
2014-06-24T20:05:14Z
PERF: Quick Shift Implementation (GH5609)
diff --git a/doc/source/release.rst b/doc/source/release.rst index f09969cda60f1..c0bd8f424c2b2 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -182,7 +182,8 @@ Improvements to existing features - Support passing ``encoding`` with xlwt (:issue:`3710`) - Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) - +- Performance improvement for ``DataFrame.shift`` (:issue: `5609`) + .. _release.bug_fixes-0.14.0: Bug Fixes diff --git a/pandas/core/common.py b/pandas/core/common.py index 46ca371284ae4..dadd21f8fc128 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2003,18 +2003,6 @@ def intersection(*seqs): return type(seqs[0])(list(result)) -def _shift_indexer(N, periods): - # small reusable utility - indexer = np.zeros(N, dtype=int) - - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - - return indexer - - def _asarray_tuplesafe(values, dtype=None): from pandas.core.index import Index diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4b28e6a09184a..ba6e7a33a7515 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3222,9 +3222,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): return self if freq is None and not len(kwds): - block_axis = self._get_block_manager_axis(axis) - indexer = com._shift_indexer(len(self._get_axis(axis)), periods) - new_data = self._data.shift(indexer=indexer, periods=periods, axis=block_axis) + new_data = self._data.shift(periods=periods, axis=axis) else: return self.tshift(periods, freq, **kwds) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index fc7b4bc23ac09..fe5ae48fea281 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -961,23 +961,20 @@ def diff(self, n): return [make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] - def shift(self, indexer, periods, axis=0): + def shift(self, periods, axis=0): """ shift the block by periods, possibly upcast """ - - new_values = self.values.take(indexer, axis=axis) # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also - new_values, fill_value = com._maybe_upcast(new_values) - + new_values, fill_value = com._maybe_upcast(self.values) + new_values = np.roll(new_values.T,periods,axis=axis) axis_indexer = [ slice(None) ] * self.ndim if periods > 0: axis_indexer[axis] = slice(None,periods) else: - axis_indexer = [ slice(None) ] * self.ndim axis_indexer[axis] = slice(periods,None) new_values[tuple(axis_indexer)] = fill_value - return [make_block(new_values, self.items, self.ref_items, + return [make_block(new_values.T, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] def eval(self, func, other, raise_on_error=True, try_cast=False): @@ -1910,9 +1907,15 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): values = self.values if inplace else self.values.copy() return [self.make_block(values.get_values(value), fill_value=value)] - def shift(self, indexer, periods, axis=0): + + def shift(self, periods, axis=0): """ shift the block by periods """ - + N = len(self.values.T) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) new_values = self.values.to_dense().take(indexer) # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index a70d756c82b0a..7f9063003191f 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -429,3 +429,16 @@ def test_unequal(name): setup, start_date=datetime(2014, 2, 7)) + +#------------------------------------------------------------------------- +# frame shift speedup issue-5609 + +setup = common_setup + """ +df = pd.DataFrame(np.random.rand(10000,500)) +""" +frame_shift_axis0 = Benchmark('df.shift(1,axis=0)', setup, + name = 'frame_shift_axis_0', + start_date=datetime(2014,1,1)) +frame_shift_axis1 = Benchmark('df.shift(1,axis=1)', setup, + name = 'frame_shift_axis_1', + start_date=datetime(2014,1,1)) \ No newline at end of file
closes #5609 ## Test code ``` import pandas as pd import numpy as np import timeit xdim = 10000 ydim = 500 repeats = 10 df = pd.DataFrame(np.random.rand(xdim,ydim)) # axis 0 shift s = timeit.default_timer() for i in range(repeats): df1 = df.shift(1,axis=0) e = timeit.default_timer() print "Axis 0 shift",e-s # axis 1 shift s = timeit.default_timer() for i in range(repeats): df1 = df.shift(1,axis=1) e = timeit.default_timer() print "Axis 1 shift",e-s ``` ## Results ``` V0.13.1 Axis 0 shift 0.958936203491 Axis 1 shift: IndexError: index 500 is out of bounds for size 500 ---------------------------------------------------------------------------------- V0.13.1-iss5609 branch Axis 0 shift 0.1622 Axis 1 shift 0.3466 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6672
2014-03-20T04:05:54Z
2014-03-20T20:04:35Z
2014-03-20T20:04:35Z
2014-08-10T13:36:10Z
BUG: Seed RNG in test_partially_invalid_plot_data
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index f5ee7fc301384..fd0463ccd7ba0 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1106,12 +1106,13 @@ def test_all_invalid_plot_data(self): @slow def test_partially_invalid_plot_data(self): - kinds = 'line', 'bar', 'barh', 'kde', 'density' - df = DataFrame(randn(10, 2), dtype=object) - df[np.random.rand(df.shape[0]) > 0.5] = 'a' - for kind in kinds: - with tm.assertRaises(TypeError): - df.plot(kind=kind) + with tm.RNGContext(42): + kinds = 'line', 'bar', 'barh', 'kde', 'density' + df = DataFrame(randn(10, 2), dtype=object) + df[np.random.rand(df.shape[0]) > 0.5] = 'a' + for kind in kinds: + with tm.assertRaises(TypeError): + df.plot(kind=kind) def test_invalid_kind(self): df = DataFrame(randn(10, 2)) diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index c3c1c4f5977e6..986e44ced83a2 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -8,7 +8,8 @@ import sys from pandas import Series from pandas.util.testing import ( - assert_almost_equal, assertRaisesRegexp, raise_with_traceback, assert_series_equal + assert_almost_equal, assertRaisesRegexp, raise_with_traceback, assert_series_equal, + RNGContext ) # let's get meta. @@ -153,3 +154,14 @@ def test_not_equal(self): # ATM meta data is not checked in assert_series_equal # self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True) + +class TestRNGContext(unittest.TestCase): + + def test_RNGContext(self): + expected0 = 1.764052345967664 + expected1 = 1.6243453636632417 + + with RNGContext(0): + with RNGContext(1): + self.assertEqual(np.random.randn(), expected1) + self.assertEqual(np.random.randn(), expected0) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 2860cdf3b200d..8abbb37646b49 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1528,3 +1528,33 @@ def skip_if_no_ne(engine='numexpr'): def disabled(t): t.disabled = True return t + + +class RNGContext(object): + """ + Context manager to set the numpy random number generator speed. Returns + to the original value upon exiting the context manager. + + Parameters + ---------- + seed : int + Seed for numpy.random.seed + + Examples + -------- + + with RNGContext(42): + np.random.randn() + """ + + def __init__(self, seed): + self.seed = seed + + def __enter__(self): + + self.start_state = np.random.get_state() + np.random.seed(self.seed) + + def __exit__(self, exc_type, exc_value, traceback): + + np.random.set_state(self.start_state)
Came up upon merging another PR. See comment https://github.com/pydata/pandas/pull/6644#issuecomment-38057195 The test relies on drawing at least 1 draw out of 10 uniform draw to be larger than 0.5; even if this didn't cause the failure originally, it should still be fixed. I set a seed that's known to produce the desired values.
https://api.github.com/repos/pandas-dev/pandas/pulls/6670
2014-03-19T14:40:29Z
2014-03-19T16:59:43Z
2014-03-19T16:59:43Z
2017-05-15T21:15:52Z
ENH/VIS: Plotting DataFrame/Series with matplotlib.table
diff --git a/doc/source/release.rst b/doc/source/release.rst index a5d41b9f6a4af..6928991e878d8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -161,6 +161,17 @@ API Changes - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) +- ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``. The ``table`` kewyword can receive the following values. + + - ``False``: Do nothing (default). + + - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. + + - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). + + Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. + + Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 58eec9fa0f528..42dca9ae86cf8 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -201,6 +201,17 @@ API changes - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) +- ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``. The ``table`` kewyword can receive the following values. + + - ``False``: Do nothing (default). + + - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. + + - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). + + Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. + + MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index d05ae4b72c2f1..09decc5ed1e25 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -415,6 +415,48 @@ Here is an example of one way to easily plot group means with standard deviation @savefig errorbar_example.png means.plot(yerr=errors, ax=ax, kind='bar') +Plotting With Table +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14 + +Plotting with matplotlib table is now supported in the ``DataFrame.plot`` and ``Series.plot`` by a ``table`` keyword. The ``table`` keyword can accept ``bool``, ``DataFrame`` or ``Series``. The simple way to draw a table is to specify ``table=True``. Data will be transposed to meet matplotlib's default layout. + +.. ipython:: python + + fig, ax = plt.subplots(1, 1) + df = DataFrame(rand(5, 3), columns=['a', 'b', 'c']) + ax.get_xaxis().set_visible(False) # Hide Ticks + + @savefig line_plot_table_true.png + df.plot(table=True, ax=ax) + +Also, you can pass different ``DataFrame`` or ``Series`` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example. + +.. ipython:: python + + fig, ax = plt.subplots(1, 1) + ax.get_xaxis().set_visible(False) # Hide Ticks + @savefig line_plot_table_data.png + df.plot(table=np.round(df.T, 2), ax=ax) + + +Finally, there is a helper function ``pandas.tools.plotting.table`` to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. + +.. ipython:: python + + from pandas.tools.plotting import table + fig, ax = plt.subplots(1, 1) + + table(ax, np.round(df.describe(), 2), + loc='upper right', colWidths=[0.2, 0.2, 0.2]) + + @savefig line_plot_table_describe.png + df.plot(ax=ax, ylim=(0, 2), legend=None) + +**Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documenation <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more. + + .. _visualization.scatter_matrix: Scatter plot matrix diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index efefc96b51104..5beb5a05a800d 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -309,6 +309,11 @@ def test_errorbar_plot(self): with tm.assertRaises(TypeError): s.plot(yerr=s_err) + def test_table(self): + _check_plot_works(self.series.plot, table=True) + _check_plot_works(self.series.plot, table=self.series) + + @tm.mplskip class TestDataFramePlots(tm.TestCase): def setUp(self): @@ -1335,6 +1340,18 @@ def test_errorbar_asymmetrical(self): tm.close() + def test_table(self): + df = DataFrame(np.random.rand(10, 3), + index=list(string.ascii_letters[:10])) + _check_plot_works(df.plot, table=True) + _check_plot_works(df.plot, table=df) + + ax = df.plot() + self.assert_(len(ax.tables) == 0) + plotting.table(ax, df.T) + self.assert_(len(ax.tables) == 1) + + @tm.mplskip class TestDataFrameGroupByPlots(tm.TestCase): def tearDown(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index c2a929bab77b5..42135e2186468 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -793,7 +793,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, ax=None, fig=None, title=None, xlim=None, ylim=None, xticks=None, yticks=None, sort_columns=False, fontsize=None, - secondary_y=False, colormap=None, **kwds): + secondary_y=False, colormap=None, + table=False, **kwds): self.data = data self.by = by @@ -849,6 +850,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, else: self.colormap = colormap + self.table = table + self.kwds = kwds self._validate_color_args() @@ -915,6 +918,7 @@ def generate(self): self._compute_plot_data() self._setup_subplots() self._make_plot() + self._add_table() self._post_plot_logic() self._adorn_subplots() @@ -1005,6 +1009,21 @@ def _compute_plot_data(self): def _make_plot(self): raise NotImplementedError + def _add_table(self): + if self.table is False: + return + elif self.table is True: + from pandas.core.frame import DataFrame + if isinstance(self.data, Series): + data = DataFrame(self.data, columns=[self.data.name]) + elif isinstance(self.data, DataFrame): + data = self.data + data = data.transpose() + else: + data = self.table + ax = self._get_ax(0) + table(ax, data) + def _post_plot_logic(self): pass @@ -1664,7 +1683,6 @@ def _post_plot_logic(self): for ax in self.axes: ax.legend(loc='best') - class BarPlot(MPLPlot): _default_rot = {'bar': 90, 'barh': 0} @@ -2594,6 +2612,47 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, return fig, axes +def table(ax, data, rowLabels=None, colLabels=None, + **kwargs): + + """ + Helper function to convert DataFrame and Series to matplotlib.table + + Parameters + ---------- + `ax`: Matplotlib axes object + `data`: DataFrame or Series + data for table contents + `kwargs`: keywords, optional + keyword arguments which passed to matplotlib.table.table. + If `rowLabels` or `colLabels` is not specified, data index or column name will be used. + + Returns + ------- + matplotlib table object + """ + from pandas import DataFrame + if isinstance(data, Series): + data = DataFrame(data, columns=[data.name]) + elif isinstance(data, DataFrame): + pass + else: + raise ValueError('Input data must be dataframe or series') + + if rowLabels is None: + rowLabels = data.index + + if colLabels is None: + colLabels = data.columns + + cellText = data.values + + import matplotlib.table + table = matplotlib.table.table(ax, cellText=cellText, + rowLabels=rowLabels, colLabels=colLabels, **kwargs) + return table + + def _get_layout(nplots): if nplots == 1: return (1, 1)
closes #4803 Related to #4803, I prepared some codes to add a `matplotlib.Table` to `DataFrame` and `Series` plot using `table` keyword. `table` can take followings as an input: - `False`: Do nothing (default). - `True`: Draw a table using the data called `plot` method. Data will be transposed to meet the matplotlib's default layout. - `DataFrame` or `Series`: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). Also, helper function `pandas.tools.plotting.table` is added to create a table from `DataFrame` and `Series`, and add it to the `matplotlib.Axes`. _Example:_ ``` fig, axes = plt.subplots(1, 3, figsize=(14, 4)) plt.subplots_adjust(top=0.97, bottom=0.2, left=0.05, right=0.97, hspace=0.2) df.plot(ax=axes[0], table=True, legend=False) axes[0].get_xaxis().set_visible(False) df.plot(ax=axes[1], table=np.round(df.T, 2), legend=False) axes[1].get_xaxis().set_visible(False) df.plot(ax=axes[2], legend=False) import pandas.tools.plotting as plotting plotting.table(axes[2], np.round(df.describe(), 2), loc='upper right', colWidths=[0.2, 0.2, 0.2]) plt.show() ``` _Outputs:_ ![figure_table](https://f.cloud.github.com/assets/1696302/2448693/7d4028e6-aea9-11e3-9a2e-f36087638dac.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6661
2014-03-18T14:42:00Z
2014-04-07T15:41:42Z
2014-04-07T15:41:42Z
2014-07-08T21:46:11Z
DOC: Mention date and time formatting available throughout MPL
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 5827f2e971e42..30e0fa9a44d7d 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -617,9 +617,18 @@ be colored differently. Colormaps ~~~~~~~~~ -A potential issue when plotting a large number of columns is that it can be difficult to distinguish some series due to repetition in the default colors. To remedy this, DataFrame plotting supports the use of the ``colormap=`` argument, which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__ or a string that is a name of a colormap registered with Matplotlib. A visualization of the default matplotlib colormaps is available `here <http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps>`__. - -As matplotlib does not directly support colormaps for line-based plots, the colors are selected based on an even spacing determined by the number of columns in the DataFrame. There is no consideration made for background color, so some colormaps will produce lines that are not easily visible. +A potential issue when plotting a large number of columns is that it can be +difficult to distinguish some series due to repetition in the default colors. To +remedy this, DataFrame plotting supports the use of the ``colormap=`` argument, +which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__ +or a string that is a name of a colormap registered with Matplotlib. A +visualization of the default matplotlib colormaps is available `here +<http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps>`__. + +As matplotlib does not directly support colormaps for line-based plots, the +colors are selected based on an even spacing determined by the number of columns +in the DataFrame. There is no consideration made for background color, so some +colormaps will produce lines that are not easily visible. To use the jet colormap, we can simply pass ``'jet'`` to ``colormap=`` @@ -674,7 +683,42 @@ Andrews curves charts: @savefig andrews_curve_winter.png andrews_curves(data, 'Name', colormap='winter') +Plotting directly with matplotlib +--------------------------------- + +In some situations it may still be preferable or necessary to prepare plots +directly with matplotlib, for instance when a certain type of plot or +customization is not (yet) supported by pandas. Series and DataFrame objects +behave like arrays and can therefore be passed directly to matplotlib functions +without explicit casts. + +Pandas also automatically registers formatters and locators that recognize date +indices, thereby extending date and time support to practically all plot types +available in matplotlib. Although this formatting does not provide the same +level of refinement you would get when plotting via pandas, it can be faster +when plotting a large number of points. + +.. note:: + + The speed up for large data sets only applies to pandas 0.14.0 and later. + + +.. ipython:: python + + price = Series(randn(150).cumsum(), + index=date_range('2000-1-1', periods=150, freq='B')) + ma = pd.rolling_mean(price, 20) + mstd = pd.rolling_std(price, 20) + + plt.figure() + + plt.plot(price.index, price, 'k') + plt.plot(ma.index, ma, 'b') + @savefig bollinger.png + plt.fill_between(mstd.index, ma-2*mstd, ma+2*mstd, color='b', alpha=0.2) + .. ipython:: python :suppress: plt.close('all') +
The visualization documentation only mentions plotting using pandas' interface. This patch adds another section mentions that it is also possible to plot directly with MPL and that date and time formatters are automatically available through all plots in MPL. An example shows matplotlib's errorbar function where points and errors on the x axis are respectively timestamps and offsets.
https://api.github.com/repos/pandas-dev/pandas/pulls/6660
2014-03-18T13:38:31Z
2014-03-24T11:03:56Z
2014-03-24T11:03:56Z
2014-07-05T02:53:02Z
BUG: Bug in compat with np.compress, surfaced in (GH6658)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7cf2bec0f4144..5510d70c1e0f7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -126,9 +126,9 @@ API Changes DataFrame returned by ``GroupBy.apply`` (:issue:`6124`). This facilitates ``DataFrame.stack`` operations where the name of the column index is used as the name of the inserted column containing the pivoted data. - -- The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions - now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A + +- The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions + now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A ``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments will not be supported in a future release (:issue:`5505`) @@ -175,7 +175,7 @@ Improvements to existing features - ``StataWriter`` and ``DataFrame.to_stata`` accept time stamp and data labels (:issue:`6545`) - offset/freq info now in Timestamp __repr__ (:issue:`4553`) - Support passing ``encoding`` with xlwt (:issue:`3710`) -- Performance improvement when converting ``DatetimeIndex`` to floating ordinals +- Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) .. _release.bug_fixes-0.14.0: @@ -259,6 +259,7 @@ Bug Fixes - Bug in ``iloc`` indexing when positional indexer matched Int64Index of corresponding axis no reordering happened (:issue:`6612`) - Bug in ``fillna`` with ``limit`` and ``value`` specified - Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) +- Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) pandas 0.13.1 ------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index dd11b7bec9216..60429630eb7d3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -342,6 +342,10 @@ def base(self): def ravel(self, order='C'): return self.values.ravel(order=order) + def compress(self, condition, axis=0, out=None, **kwargs): + # 1-d compat with numpy + return self[condition] + def transpose(self): """ support for compatiblity """ return self diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index b90cdcf55f636..a94ca5dfc1075 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3436,6 +3436,15 @@ def f(x): s = Series(np.random.randn(10)) tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F')) + # compress + # GH 6658 + s = Series([0,1.,-1],index=list('abc')) + result = np.compress(s>0,s) + assert_series_equal(result, Series([1.],index=['b'])) + + result = np.compress(s<-1,s) + assert_series_equal(result, Series([],dtype='float64')) + def test_complexx(self): # GH4819
closes #6658
https://api.github.com/repos/pandas-dev/pandas/pulls/6659
2014-03-18T12:18:47Z
2014-03-18T14:39:47Z
2014-03-18T14:39:47Z
2014-06-21T09:06:49Z
REGR: fixing Timestamp/Series subtraction, resolves #6648
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 19703b9e30ef6..8d9e4d5069f61 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -5,12 +5,13 @@ from pandas import tslib import datetime -from pandas.core.api import Timestamp +from pandas.core.api import Timestamp, Series from pandas.tslib import period_asfreq, period_ordinal from pandas.tseries.index import date_range from pandas.tseries.frequencies import get_freq from pandas import _np_version_under1p7 import pandas.util.testing as tm +from pandas.util.testing import assert_series_equal class TestTimestamp(tm.TestCase): def test_repr(self): @@ -333,6 +334,15 @@ def test_timestamp_and_datetime(self): self.assertEqual((Timestamp(datetime.datetime(2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1) self.assertEqual((datetime.datetime(2013, 10, 12) - Timestamp(datetime.datetime(2013, 10, 13))).days, -1) + def test_timestamp_and_series(self): + timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', tz='US/Eastern')) + first_timestamp = timestamp_series[0] + + if not _np_version_under1p7: + delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) + assert_series_equal(timestamp_series - first_timestamp, delta_series) + assert_series_equal(first_timestamp - timestamp_series, -delta_series) + def test_addition_subtraction_types(self): # Assert on the types resulting from Timestamp +/- various date/time objects datetime_instance = datetime.datetime(2014, 3, 4) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 88559fdfee9de..1270b8ab1923e 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -707,11 +707,12 @@ cdef class _Timestamp(datetime): return result def __sub__(self, other): - if isinstance(other, datetime): - return datetime.__sub__(self, other) + if is_timedelta64_object(other) or is_integer_object(other) \ + or isinstance(other, timedelta) or hasattr(other, 'delta'): + neg_other = -other + return self + neg_other - neg_other = -other - return self + neg_other + return datetime.__sub__(self, other) cpdef _get_field(self, field): out = get_date_field(np.array([self.value], dtype=np.int64), field)
closes #6648. Admittedly the conditional in `__sub__` is a little gross now. I didn't add release notes as this is really just patching up a yet-to-be released item, but please let me know if I should. Also, the test only runs on numpy >= 1.7, is that okay?
https://api.github.com/repos/pandas-dev/pandas/pulls/6657
2014-03-17T23:10:30Z
2014-03-18T10:09:59Z
2014-03-18T10:09:59Z
2014-07-16T08:58:46Z
ENH/VIS: Area plot is now supported by kind='area'.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4532c1d6eee11..2215b40087969 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -190,6 +190,8 @@ API Changes ``data`` argument (:issue:`5357`) - groupby will now not return the grouped column for non-cython functions (:issue:`5610`), as its already the index +- ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) +- Line plot can be stacked by ``stacked=True``. (:issue:`6656`) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ffdd77bab9efd..35f2e29d138b1 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -364,10 +364,12 @@ Plotting ~~~~~~~~ - Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`), See :ref:`the docs<visualization.hexbin>`. +- ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) - Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`), See :ref:`the docs<visualization.errorbars>`. - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. - ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. (:issue:`6014`) +- Line plot and area plot can be stacked by ``stacked=True`` (:issue:`6656`) - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 15d05ff046bb1..5255ddf3c33e7 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -461,6 +461,40 @@ Finally, there is a helper function ``pandas.tools.plotting.table`` to create a **Note**: You can get table instances on the axes using ``axes.tables`` property for further decorations. See the `matplotlib table documenation <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ for more. +.. _visualization.area_plot: + +Area plot +~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14 + +You can create area plots with ``Series.plot`` and ``DataFrame.plot`` by passing ``kind='area'``. Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values. + +When input data contains `NaN`, it will be automatically filled by 0. If you want to drop or fill by different values, use :func:`dataframe.dropna` or :func:`dataframe.fillna` before calling `plot`. + +.. ipython:: python + :suppress: + + plt.figure(); + +.. ipython:: python + + df = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) + + @savefig area_plot_stacked.png + df.plot(kind='area'); + +To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified: + +.. ipython:: python + :suppress: + + plt.figure(); + +.. ipython:: python + + @savefig area_plot_unstacked.png + df.plot(kind='area', stacked=False); .. _visualization.scatter_matrix: diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index a1b6c7b7c518e..8b79c9e9d1307 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -14,7 +14,7 @@ import numpy as np from numpy import random -from numpy.random import randn +from numpy.random import rand, randn from numpy.testing import assert_array_equal from numpy.testing.decorators import slow @@ -54,9 +54,10 @@ def test_plot(self): _check_plot_works(self.ts.plot, style='.', logx=True) _check_plot_works(self.ts.plot, style='.', loglog=True) _check_plot_works(self.ts[:10].plot, kind='bar') + _check_plot_works(self.ts.plot, kind='area', stacked=False) _check_plot_works(self.iseries.plot) - for kind in plotting._common_kinds: + for kind in ['line', 'bar', 'barh', 'kde']: _check_plot_works(self.series[:5].plot, kind=kind) _check_plot_works(self.series[:10].plot, kind='barh') @@ -75,6 +76,33 @@ def test_plot_figsize_and_title(self): assert_array_equal(np.round(ax.figure.get_size_inches()), np.array((16., 8.))) + def test_ts_area_lim(self): + ax = self.ts.plot(kind='area', stacked=False) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0]) + self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) + + def test_line_area_nan_series(self): + values = [1, 2, np.nan, 3] + s = Series(values) + ts = Series(values, index=tm.makeDateIndex(k=4)) + + for d in [s, ts]: + ax = _check_plot_works(d.plot) + masked = ax.lines[0].get_ydata() + # remove nan for comparison purpose + self.assert_numpy_array_equal(np.delete(masked.data, 2), np.array([1, 2, 3])) + self.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False])) + + expected = np.array([1, 2, 0, 3]) + ax = _check_plot_works(d.plot, stacked=True) + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot, kind='area') + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot, kind='area', stacked=False) + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + @slow def test_bar_log(self): expected = np.array([1., 10., 100., 1000.]) @@ -500,7 +528,7 @@ def test_subplots(self): df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10])) - for kind in ['bar', 'barh', 'line']: + for kind in ['bar', 'barh', 'line', 'area']: axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) for ax, column in zip(axes, df.columns): @@ -529,6 +557,104 @@ def test_subplots(self): for ax in axes: self.assertTrue(ax.get_legend() is None) + def test_negative_log(self): + df = - DataFrame(rand(6, 4), + index=list(string.ascii_letters[:6]), + columns=['x', 'y', 'z', 'four']) + + with tm.assertRaises(ValueError): + df.plot(kind='area', logy=True) + with tm.assertRaises(ValueError): + df.plot(kind='area', loglog=True) + + def _compare_stacked_y_cood(self, normal_lines, stacked_lines): + base = np.zeros(len(normal_lines[0].get_data()[1])) + for nl, sl in zip(normal_lines, stacked_lines): + base += nl.get_data()[1] # get y coodinates + sy = sl.get_data()[1] + self.assert_numpy_array_equal(base, sy) + + def test_line_area_stacked(self): + with tm.RNGContext(42): + df = DataFrame(rand(6, 4), + columns=['w', 'x', 'y', 'z']) + neg_df = - df + # each column has either positive or negative value + sep_df = DataFrame({'w': rand(6), 'x': rand(6), + 'y': - rand(6), 'z': - rand(6)}) + # each column has positive-negative mixed value + mixed_df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]), + columns=['w', 'x', 'y', 'z']) + + for kind in ['line', 'area']: + ax1 = _check_plot_works(df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines, ax2.lines) + + ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines, ax2.lines) + + ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2]) + self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:]) + + _check_plot_works(mixed_df.plot, stacked=False) + with tm.assertRaises(ValueError): + mixed_df.plot(stacked=True) + + _check_plot_works(df.plot, kind=kind, logx=True, stacked=True) + + def test_line_area_nan_df(self): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({'a': values1, 'b': values2}) + tdf = DataFrame({'a': values1, 'b': values2}, index=tm.makeDateIndex(k=4)) + + for d in [df, tdf]: + ax = _check_plot_works(d.plot) + masked1 = ax.lines[0].get_ydata() + masked2 = ax.lines[1].get_ydata() + # remove nan for comparison purpose + self.assert_numpy_array_equal(np.delete(masked1.data, 2), np.array([1, 2, 3])) + self.assert_numpy_array_equal(np.delete(masked2.data, 1), np.array([3, 2, 1])) + self.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False])) + self.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False])) + + expected1 = np.array([1, 2, 0, 3]) + expected2 = np.array([3, 0, 2, 1]) + + ax = _check_plot_works(d.plot, stacked=True) + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + ax = _check_plot_works(d.plot, kind='area') + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + ax = _check_plot_works(d.plot, kind='area', stacked=False) + self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + + def test_area_lim(self): + df = DataFrame(rand(6, 4), + columns=['x', 'y', 'z', 'four']) + + neg_df = - df + for stacked in [True, False]: + ax = _check_plot_works(df.plot, kind='area', stacked=stacked) + xmin, xmax = ax.get_xlim() + ymin, ymax = ax.get_ylim() + lines = ax.get_lines() + self.assertEqual(xmin, lines[0].get_data()[0][0]) + self.assertEqual(xmax, lines[0].get_data()[0][-1]) + self.assertEqual(ymin, 0) + + ax = _check_plot_works(neg_df.plot, kind='area', stacked=stacked) + ymin, ymax = ax.get_ylim() + self.assertEqual(ymax, 0) + @slow def test_bar_colors(self): import matplotlib.pyplot as plt @@ -1077,11 +1203,11 @@ def _check_legend_labels(self, ax, labels): @slow def test_df_legend_labels(self): - kinds = 'line', 'bar', 'barh', 'kde', 'density' - df = DataFrame(randn(3, 3), columns=['a', 'b', 'c']) - df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f']) - df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i']) - df4 = DataFrame(randn(3, 3), columns=['j', 'k', 'l']) + kinds = 'line', 'bar', 'barh', 'kde', 'density', 'area' + df = DataFrame(rand(3, 3), columns=['a', 'b', 'c']) + df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f']) + df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i']) + df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l']) for kind in kinds: ax = df.plot(kind=kind, legend=True) @@ -1170,6 +1296,22 @@ def test_style_by_column(self): for i, l in enumerate(ax.get_lines()[:len(markers)]): self.assertEqual(l.get_marker(), markers[i]) + def check_line_colors(self, colors, lines): + for i, l in enumerate(lines): + xp = colors[i] + rs = l.get_color() + self.assertEqual(xp, rs) + + def check_collection_colors(self, colors, cols): + from matplotlib.colors import ColorConverter + conv = ColorConverter() + for i, c in enumerate(cols): + xp = colors[i] + xp = conv.to_rgba(xp) + rs = c.get_facecolor()[0] + for x, y in zip(xp, rs): + self.assertEqual(x, y) + @slow def test_line_colors(self): import matplotlib.pyplot as plt @@ -1177,16 +1319,10 @@ def test_line_colors(self): from matplotlib import cm custom_colors = 'rgcby' - df = DataFrame(randn(5, 5)) ax = df.plot(color=custom_colors) - - lines = ax.get_lines() - for i, l in enumerate(lines): - xp = custom_colors[i] - rs = l.get_color() - self.assertEqual(xp, rs) + self.check_line_colors(custom_colors, ax.get_lines()) tmp = sys.stderr sys.stderr = StringIO() @@ -1194,7 +1330,7 @@ def test_line_colors(self): tm.close() ax2 = df.plot(colors=custom_colors) lines2 = ax2.get_lines() - for l1, l2 in zip(lines, lines2): + for l1, l2 in zip(ax.get_lines(), lines2): self.assertEqual(l1.get_color(), l2.get_color()) finally: sys.stderr = tmp @@ -1204,30 +1340,45 @@ def test_line_colors(self): ax = df.plot(colormap='jet') rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - - lines = ax.get_lines() - for i, l in enumerate(lines): - xp = rgba_colors[i] - rs = l.get_color() - self.assertEqual(xp, rs) + self.check_line_colors(rgba_colors, ax.get_lines()) tm.close() ax = df.plot(colormap=cm.jet) rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) - - lines = ax.get_lines() - for i, l in enumerate(lines): - xp = rgba_colors[i] - rs = l.get_color() - self.assertEqual(xp, rs) + self.check_line_colors(rgba_colors, ax.get_lines()) # make color a list if plotting one column frame # handles cases like df.plot(color='DodgerBlue') tm.close() df.ix[:, [0]].plot(color='DodgerBlue') + @slow + def test_area_colors(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + custom_colors = 'rgcby' + df = DataFrame(rand(5, 5)) + + ax = df.plot(kind='area', color=custom_colors) + self.check_line_colors(custom_colors, ax.get_lines()) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + self.check_collection_colors(custom_colors, poly) + + ax = df.plot(kind='area', colormap='jet') + rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) + self.check_line_colors(rgba_colors, ax.get_lines()) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + self.check_collection_colors(rgba_colors, poly) + + ax = df.plot(kind='area', colormap=cm.jet) + rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) + self.check_line_colors(rgba_colors, ax.get_lines()) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + self.check_collection_colors(rgba_colors, poly) + def test_default_color_cycle(self): import matplotlib.pyplot as plt plt.rcParams['axes.color_cycle'] = list('rgbk') @@ -1268,6 +1419,15 @@ def test_partially_invalid_plot_data(self): with tm.assertRaises(TypeError): df.plot(kind=kind) + with tm.RNGContext(42): + # area plot doesn't support positive/negative mixed data + kinds = ['area'] + df = DataFrame(rand(10, 2), dtype=object) + df[np.random.rand(df.shape[0]) > 0.5] = 'a' + for kind in kinds: + with tm.assertRaises(TypeError): + df.plot(kind=kind) + def test_invalid_kind(self): df = DataFrame(randn(10, 2)) with tm.assertRaises(ValueError): @@ -1671,6 +1831,7 @@ def _check_plot_works(f, *args, **kwargs): plt.savefig(path) finally: tm.close(fig) + return ret def curpath(): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index d79177e3db0d3..ab3717d52e4f2 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1477,6 +1477,10 @@ def _post_plot_logic(self): class LinePlot(MPLPlot): def __init__(self, data, **kwargs): + self.stacked = kwargs.pop('stacked', False) + if self.stacked: + data = data.fillna(value=0) + MPLPlot.__init__(self, data, **kwargs) self.x_compat = plot_params['x_compat'] if 'x_compat' in self.kwds: @@ -1529,9 +1533,15 @@ def _use_dynamic_x(self): return (freq is not None) and self._is_dynamic_freq(freq) - def _make_plot(self): + def _is_ts_plot(self): # this is slightly deceptive - if not self.x_compat and self.use_index and self._use_dynamic_x(): + return not self.x_compat and self.use_index and self._use_dynamic_x() + + def _make_plot(self): + self._pos_prior = np.zeros(len(self.data)) + self._neg_prior = np.zeros(len(self.data)) + + if self._is_ts_plot(): data = self._maybe_convert_index(self.data) self._make_ts_plot(data) else: @@ -1553,55 +1563,75 @@ def _make_plot(self): if err_kw in kwds: if isinstance(kwds[err_kw], (DataFrame, dict)): if label in kwds[err_kw].keys(): - kwds[err_kw] = kwds[err_kw][label] + kwds[err_kw] = kwds[err_kw][label] else: del kwds[err_kw] elif kwds[err_kw] is not None: kwds[err_kw] = kwds[err_kw][i] label = com.pprint_thing(label) # .encode('utf-8') + kwds['label'] = label - mask = com.isnull(y) - if mask.any(): - y = np.ma.array(y) - y = np.ma.masked_where(mask, y) + y_values = self._get_stacked_values(y, label) + + if not self.stacked: + mask = com.isnull(y_values) + if mask.any(): + y_values = np.ma.array(y_values) + y_values = np.ma.masked_where(mask, y_values) - kwds['label'] = label # prevent style kwarg from going to errorbar, where it is unsupported - if style is not None and plotf.__name__=='plot': - args = (ax, x, y, style) + if style is not None and plotf.__name__ != 'errorbar': + args = (ax, x, y_values, style) else: - args = (ax, x, y) + args = (ax, x, y_values) newlines = plotf(*args, **kwds) - self._add_legend_handle(newlines[0], label, index=i) lines.append(newlines[0]) - if self._is_datetype(): - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + if self.stacked and not self.subplots: + if (y >= 0).all(): + self._pos_prior += y + elif (y <= 0).all(): + self._neg_prior += y + + if self._is_datetype(): + left, right = _get_xlim(lines) + ax.set_xlim(left, right) + + def _get_stacked_values(self, y, label): + if self.stacked: + if (y >= 0).all(): + return self._pos_prior + y + elif (y <= 0).all(): + return self._neg_prior + y + else: + raise ValueError('When stacked is True, each column must be either all positive or negative.' + '{0} contains both positive and negative values'.format(label)) + else: + return y - def _make_ts_plot(self, data, **kwargs): + def _get_ts_plot_function(self): from pandas.tseries.plotting import tsplot - from pandas.core.frame import DataFrame - - kwargs = kwargs.copy() - colors = self._get_colors() - plotf = self._get_plot_function() - - def _plot(data, col_num, ax, label, style, **kwds): - - if plotf.__name__=='plot': - newlines = tsplot(data, plotf, ax=ax, label=label, - style=style, **kwds) + + def _plot(data, ax, label, style, **kwds): # errorbar function does not support style argument - elif plotf.__name__=='errorbar': - newlines = tsplot(data, plotf, ax=ax, label=label, - **kwds) + if plotf.__name__ == 'errorbar': + lines = tsplot(data, plotf, ax=ax, label=label, + **kwds) + return lines + else: + lines = tsplot(data, plotf, ax=ax, label=label, + style=style, **kwds) + return lines + return _plot - self._add_legend_handle(newlines[0], label, index=col_num) + def _make_ts_plot(self, data, **kwargs): + from pandas.core.frame import DataFrame + colors = self._get_colors() + plotf = self._get_ts_plot_function() it = self._iter_data(data=data, keep_index=True) for i, (label, y) in enumerate(it): @@ -1622,7 +1652,17 @@ def _plot(data, col_num, ax, label, style, **kwds): kwds['yerr'] = yerr[i] label = com.pprint_thing(label) - _plot(y, i, ax, label, style, **kwds) + + y_values = self._get_stacked_values(y, label) + + newlines = plotf(y_values, ax, label, style, **kwds) + self._add_legend_handle(newlines[0], label, index=i) + + if self.stacked and not self.subplots: + if (y >= 0).all(): + self._pos_prior += y + elif (y <= 0).all(): + self._neg_prior += y def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index @@ -1676,6 +1716,76 @@ def _post_plot_logic(self): ax.set_xlabel(index_name) +class AreaPlot(LinePlot): + + def __init__(self, data, **kwargs): + kwargs.setdefault('stacked', True) + data = data.fillna(value=0) + LinePlot.__init__(self, data, **kwargs) + + if not self.stacked: + # use smaller alpha to distinguish overlap + self.kwds.setdefault('alpha', 0.5) + + def _get_plot_function(self): + if self.logy or self.loglog: + raise ValueError("Log-y scales are not supported in area plot") + else: + f = LinePlot._get_plot_function(self) + + def plotf(*args, **kwds): + lines = f(*args, **kwds) + + # insert fill_between starting point + y = args[2] + if (y >= 0).all(): + start = self._pos_prior + elif (y <= 0).all(): + start = self._neg_prior + else: + start = np.zeros(len(y)) + + # get x data from the line + # to retrieve x coodinates of tsplot + xdata = lines[0].get_data()[0] + # remove style + args = (args[0], xdata, start, y) + + if not 'color' in kwds: + kwds['color'] = lines[0].get_color() + + self.plt.Axes.fill_between(*args, **kwds) + return lines + + return plotf + + def _add_legend_handle(self, handle, label, index=None): + from matplotlib.patches import Rectangle + # Because fill_between isn't supported in legend, + # specifically add Rectangle handle here + alpha = self.kwds.get('alpha', 0.5) + handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha) + LinePlot._add_legend_handle(self, handle, label, index=index) + + def _post_plot_logic(self): + LinePlot._post_plot_logic(self) + + if self._is_ts_plot(): + pass + else: + if self.xlim is None: + for ax in self.axes: + ax.set_xlim(0, len(self.data)-1) + + if self.ylim is None: + if (self.data >= 0).all().all(): + for ax in self.axes: + ax.set_ylim(0, None) + elif (self.data <= 0).all().all(): + for ax in self.axes: + ax.set_ylim(None, 0) + + class BarPlot(MPLPlot): _default_rot = {'bar': 90, 'barh': 0} @@ -1827,14 +1937,15 @@ class HistPlot(MPLPlot): pass # kinds supported by both dataframe and series -_common_kinds = ['line', 'bar', 'barh', 'kde', 'density'] +_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area'] # kinds supported by dataframe _dataframe_kinds = ['scatter', 'hexbin'] _all_kinds = _common_kinds + _dataframe_kinds _plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot, 'kde': KdePlot, - 'scatter': ScatterPlot, 'hexbin': HexBinPlot} + 'scatter': ScatterPlot, 'hexbin': HexBinPlot, + 'area': AreaPlot} def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, @@ -1879,12 +1990,14 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, ax : matplotlib axis object, default None style : list or dict matplotlib line style per column - kind : {'line', 'bar', 'barh', 'kde', 'density', 'scatter', 'hexbin'} + kind : {'line', 'bar', 'barh', 'kde', 'density', 'area', scatter', 'hexbin'} + line : line plot bar : vertical bar plot barh : horizontal bar plot kde/density : Kernel Density Estimation plot - scatter: scatter plot - hexbin: hexbin plot + area : area plot + scatter : scatter plot + hexbin : hexbin plot logx : boolean, default False Use log scaling on x axis logy : boolean, default False @@ -2002,10 +2115,12 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, Parameters ---------- label : label argument to provide to plot - kind : {'line', 'bar', 'barh', 'kde', 'density'} + kind : {'line', 'bar', 'barh', 'kde', 'density', 'area'} + line : line plot bar : vertical bar plot barh : horizontal bar plot kde/density : Kernel Density Estimation plot + area : area plot use_index : boolean, default True Plot index as axis tick labels rot : int, default None
Area plot is added to plotting method. The AreaPlot class is created as a subclass of LinePlot, thus it works also in time series. By default, area plot is being stacked. When area plot is not stacked (`stacked=False`), alpha value is set to 0.5 to show overlapped area if not configured specifically. As a side benefit, line plot also can be stacked by specifying `stacked=True` (disabled by default). Different from stacked bar plot, I don't know good visualization for positive/negative mixed data. Thus, input must be all positive or all negative when `stacked=True`. I'll try to implement it if there is a good way. Also, area plot doesn't support logy and loglog plot because filling area starts from 0. Note: Area plot's legend is implemented based on the answer described in: http://stackoverflow.com/questions/14534130/legend-not-showing-up-in-matplotlib-stacked-area-plot _Example:_ ![figure_1](https://f.cloud.github.com/assets/1696302/2436288/dee8f4e6-add8-11e3-9297-a7403935844d.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6656
2014-03-17T13:38:17Z
2014-05-01T15:14:42Z
2014-05-01T15:14:42Z
2014-06-12T05:20:06Z
DOC/API: pd.Grouper docs / api
diff --git a/doc/source/api.rst b/doc/source/api.rst index 811301a6bbbca..1c80712e82d49 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1193,6 +1193,7 @@ Indexing, iteration GroupBy.groups GroupBy.indices GroupBy.get_group + Grouper Function application ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/release.rst b/doc/source/release.rst index c0415a350515f..bc4807a293d12 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -132,7 +132,7 @@ API Changes ``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments will not be supported in a future release (:issue:`5505`) -- Allow specification of a more complex groupby, via ``pd.Groupby`` (:issue:`3794`) +- Allow specification of a more complex groupby, via ``pd.Grouper`` (:issue:`3794`) - A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of the index, rather than requiring a list of tuple (:issue:`4370`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 54995fc8daeb5..ea321cbab545a 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -94,7 +94,7 @@ These are out-of-bounds selections g.nth(0, dropna='any') # similar to old behaviour -- Allow specification of a more complex groupby via ``pd.Groupby``, such as grouping +- Allow specification of a more complex groupby via ``pd.Grouper``, such as grouping by a Time and a string field simultaneously. See :ref:`the docs <groupby.specify>`. (:issue:`3794`) - Local variable usage has changed in diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 683c07b70d0f2..15e6381cbe2fa 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -141,14 +141,26 @@ def _last(x): class Grouper(object): """ - A Grouper allows the user to specify a groupby instruction + A Grouper allows the user to specify a groupby instruction for a target object + + This specification will select a column via the key parameter, or if the level and/or + axis parameters are given, a level of the index of the target object. + + These are local specifications and will override 'global' settings, that is the parameters + axis and level which are passed to the groupby itself. Parameters ---------- - key : groupby key, default None - level : name, int level number, default None - freq : string / freqency object, default None - sort : boolean, whether to sort the resulting labels, default True + key : string, defaults to None + groupby key, which selects the grouping column of the target + level : name/number, defaults to None + the level for the target index + freq : string / freqency object, defaults to None + This will groupby the specified frequency if the target selection (via key or level) is + a datetime-like object + axis : number/name of the axis, defaults to None + sort : boolean, default to False + whether to sort the resulting labels Returns ------- @@ -156,10 +168,10 @@ class Grouper(object): Examples -------- - df.groupby(Group(key='A')) : syntatic sugar for df.groupby('A') - df.groupby(Group(key='date',freq='60s')) : specify a resample on the column 'date' - df.groupby(Group(level='date',freq='60s',axis=1)) : - specify a resample on the level 'date' on the columns axis with a frequency of 60s + >>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A') + >>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date' + >>> df.groupby(Grouper(level='date',freq='60s',axis=1)) : + specify a resample on the level 'date' on the columns axis with a frequency of 60s """ @@ -186,7 +198,7 @@ def __init__(self, key=None, level=None, freq=None, axis=None, sort=False): def ax(self): return self.grouper - def get_grouper(self, obj): + def _get_grouper(self, obj): """ Parameters @@ -198,10 +210,10 @@ def get_grouper(self, obj): a tuple of binner, grouper, obj (possibly sorted) """ - self.set_grouper(obj) + self._set_grouper(obj) return self.binner, self.grouper, self.obj - def set_grouper(self, obj, sort=False): + def _set_grouper(self, obj, sort=False): """ given an object and the specifcations, setup the internal grouper for this particular specification @@ -252,7 +264,7 @@ def set_grouper(self, obj, sort=False): self.grouper = ax return self.grouper - def get_binner_for_grouping(self, obj): + def _get_binner_for_grouping(self, obj): raise NotImplementedError @property @@ -1685,7 +1697,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, elif isinstance(self.grouper, Grouper): # get the new grouper - grouper = self.grouper.get_binner_for_grouping(self.obj) + grouper = self.grouper._get_binner_for_grouping(self.obj) self.obj = self.grouper.obj self.grouper = grouper if self.name is None: @@ -1795,7 +1807,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True): # a passed in Grouper, directly convert if isinstance(key, Grouper): - binner, grouper, obj = key.get_grouper(obj) + binner, grouper, obj = key._get_grouper(obj) return grouper, [], obj # already have a BaseGrouper, just return it diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index b29f67b40894b..51144cb3bba2c 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -76,7 +76,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs) def resample(self, obj): - self.set_grouper(obj, sort=True) + self._set_grouper(obj, sort=True) ax = self.grouper if isinstance(ax, DatetimeIndex): @@ -93,7 +93,7 @@ def resample(self, obj): rs = self._resample_periods() else: obj = self.obj.to_timestamp(how=self.convention) - self.set_grouper(obj) + self._set_grouper(obj) rs = self._resample_timestamps() elif len(ax) == 0: return self.obj @@ -104,11 +104,11 @@ def resample(self, obj): rs_axis.name = ax.name return rs - def get_grouper(self, obj): - self.set_grouper(obj) - return self.get_binner_for_resample() + def _get_grouper(self, obj): + self._set_grouper(obj) + return self._get_binner_for_resample() - def get_binner_for_resample(self): + def _get_binner_for_resample(self): # create the BinGrouper # assume that self.set_grouper(obj) has already been called @@ -121,12 +121,12 @@ def get_binner_for_resample(self): self.grouper = BinGrouper(bins, binlabels) return self.binner, self.grouper, self.obj - def get_binner_for_grouping(self, obj): + def _get_binner_for_grouping(self, obj): # return an ordering of the transformed group labels, # suitable for multi-grouping, e.g the labels for # the resampled intervals - ax = self.set_grouper(obj) - self.get_binner_for_resample() + ax = self._set_grouper(obj) + self._get_binner_for_resample() # create the grouper binner = self.binner @@ -233,7 +233,7 @@ def _resample_timestamps(self): # assumes set_grouper(obj) already called axlabels = self.ax - self.get_binner_for_resample() + self._get_binner_for_resample() grouper = self.grouper binner = self.binner obj = self.obj diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 20c6724726955..242d656b8794f 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1134,7 +1134,7 @@ def test_apply_iteration(self): df = DataFrame({'open': 1, 'close': 2}, index=ind) tg = TimeGrouper('M') - _, grouper, _ = tg.get_grouper(df) + _, grouper, _ = tg._get_grouper(df) # Errors grouped = df.groupby(grouper, group_keys=False) @@ -1151,7 +1151,7 @@ def test_panel_aggregation(self): minor_axis=['A', 'B', 'C', 'D']) tg = TimeGrouper('M', axis=1) - _, grouper, _ = tg.get_grouper(wp) + _, grouper, _ = tg._get_grouper(wp) bingrouped = wp.groupby(grouper) binagg = bingrouped.mean()
address comments from #6516 docs and api
https://api.github.com/repos/pandas-dev/pandas/pulls/6655
2014-03-17T13:23:26Z
2014-03-17T13:43:57Z
2014-03-17T13:43:57Z
2014-07-08T16:11:44Z
DOC: some minor doc fixes
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 7fc6f6d197dff..d6c17a3066b86 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -571,7 +571,7 @@ We can produce pivot tables from this data very easily: .. ipython:: python - pd.pivot_table(df, values='D', rows=['A', 'B'], cols=['C']) + pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C']) Time Series diff --git a/doc/source/release.rst b/doc/source/release.rst index 369f83066ed0d..c0415a350515f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -118,6 +118,7 @@ API Changes - Better propagation/preservation of Series names when performing groupby operations: + - ``SeriesGroupBy.agg`` will ensure that the name attribute of the original series is propagated to the result (:issue:`6265`). - If the function provided to ``GroupBy.apply`` returns a named series, the @@ -221,7 +222,7 @@ Bug Fixes - Bug in :meth:`DataFrame.replace` where nested dicts were erroneously depending on the order of dictionary keys and values (:issue:`5338`). - Perf issue in concatting with empty objects (:issue:`3259`) -- Clarify sorting of ``sym_diff`` on ``Index``es with ``NaN``s (:issue:`6444`) +- Clarify sorting of ``sym_diff`` on ``Index`` objects with ``NaN`` values (:issue:`6444`) - Regression in ``MultiIndex.from_product`` with a ``DatetimeIndex`` as input (:issue:`6439`) - Bug in ``str.extract`` when passed a non-default index (:issue:`6348`) - Bug in ``str.split`` when passed ``pat=None`` and ``n=1`` (:issue:`6466`)
- some sphinx format tweaks in release.rst - use new api in 10min.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/6653
2014-03-16T23:14:57Z
2014-03-16T23:15:45Z
2014-03-16T23:15:45Z
2014-07-16T08:58:36Z
BUG: disallow mixed dtype operations in eval/query
diff --git a/doc/source/release.rst b/doc/source/release.rst index 369f83066ed0d..def97ed41b906 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -136,6 +136,8 @@ API Changes - A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of the index, rather than requiring a list of tuple (:issue:`4370`) +- Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`) + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py index 1c40dc9930856..353c58c23febd 100644 --- a/pandas/computation/expr.py +++ b/pandas/computation/expr.py @@ -377,6 +377,11 @@ def _possibly_evaluate_binop(self, op, op_class, lhs, rhs, '<=', '>=')): res = op(lhs, rhs) + if res.has_invalid_return_type: + raise TypeError("unsupported operand type(s) for {0}:" + " '{1}' and '{2}'".format(res.op, lhs.type, + rhs.type)) + if self.engine != 'pytables': if (res.op in _cmp_ops_syms and getattr(lhs, 'is_datetime', False) diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py index 041ab77bb61f4..1f57c459149ad 100644 --- a/pandas/computation/ops.py +++ b/pandas/computation/ops.py @@ -169,7 +169,7 @@ def name(self): class Op(StringMixin): - """Hold an operator of unknown arity + """Hold an operator of arbitrary arity """ def __init__(self, op, operands, *args, **kwargs): @@ -195,6 +195,16 @@ def return_type(self): return np.bool_ return _result_type_many(*(term.type for term in com.flatten(self))) + @property + def has_invalid_return_type(self): + types = self.operand_types + obj_dtype_set = frozenset([np.dtype('object')]) + return self.return_type == object and types - obj_dtype_set + + @property + def operand_types(self): + return frozenset(term.type for term in com.flatten(self)) + @property def isscalar(self): return all(operand.isscalar for operand in self.operands) @@ -412,6 +422,10 @@ def _disallow_scalar_only_bool_ops(self): raise NotImplementedError("cannot evaluate scalar only bool ops") +def isnumeric(dtype): + return issubclass(np.dtype(dtype).type, np.number) + + class Div(BinOp): """Div operator to special case casting. @@ -428,6 +442,12 @@ class Div(BinOp): def __init__(self, lhs, rhs, truediv, *args, **kwargs): super(Div, self).__init__('/', lhs, rhs, *args, **kwargs) + if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): + raise TypeError("unsupported operand type(s) for {0}:" + " '{1}' and '{2}'".format(self.op, + lhs.return_type, + rhs.return_type)) + if truediv or PY3: _cast_inplace(com.flatten(self), np.float_) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 8a4207da76c52..4391de0ebfe58 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -13243,6 +13243,16 @@ def test_bool_arith_expr(self): expect = self.frame.a[self.frame.a < 1] + self.frame.b assert_series_equal(res, expect) + def test_invalid_type_for_operator_raises(self): + df = DataFrame({'a': [1, 2], 'b': ['c', 'd']}) + ops = '+', '-', '*', '/' + for op in ops: + with tm.assertRaisesRegexp(TypeError, + "unsupported operand type\(s\) for " + ".+: '.+' and '.+'"): + df.eval('a {0} b'.format(op), engine=self.engine, + parser=self.parser) + class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
closes #5198
https://api.github.com/repos/pandas-dev/pandas/pulls/6652
2014-03-16T21:32:51Z
2014-03-17T14:18:12Z
2014-03-17T14:18:12Z
2014-06-21T20:57:48Z
TST: skip sql tests if connection to server fails
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 46b5758904315..9ecb605def400 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -705,11 +705,14 @@ def setUp(self): try: import pymysql self.driver = pymysql - except ImportError: - raise nose.SkipTest + raise nose.SkipTest('pymysql not installed') + + try: + self.conn = self.connect() + except self.driver.err.OperationalError: + raise nose.SkipTest("Can't connect to MySQL server") - self.conn = self.connect() self.pandasSQL = sql.PandasSQLLegacy(self.conn, 'mysql') self._load_iris_data() @@ -725,53 +728,55 @@ def tearDown(self): class TestMySQLAlchemy(_TestSQLAlchemy): - flavor = 'mysql' - - def connect(self): - return sqlalchemy.create_engine( - 'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver)) + flavor = 'mysql' - def setUp(self): - if not SQLALCHEMY_INSTALLED: - raise nose.SkipTest('SQLAlchemy not installed') + def connect(self): + return sqlalchemy.create_engine( + 'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver)) - try: - import pymysql - self.driver = 'pymysql' + def setUp(self): + if not SQLALCHEMY_INSTALLED: + raise nose.SkipTest('SQLAlchemy not installed') - except ImportError: - raise nose.SkipTest + try: + import pymysql + self.driver = 'pymysql' + except ImportError: + raise nose.SkipTest('pymysql not installed') + try: self.conn = self.connect() self.pandasSQL = sql.PandasSQLAlchemy(self.conn) + except sqlalchemy.exc.OperationalError: + raise nose.SkipTest("Can't connect to MySQL server") - self._load_iris_data() - self._load_raw_sql() + self._load_iris_data() + self._load_raw_sql() - self._load_test1_data() + self._load_test1_data() + + def tearDown(self): + c = self.conn.execute('SHOW TABLES') + for table in c.fetchall(): + self.conn.execute('DROP TABLE %s' % table[0]) - def tearDown(self): - c = self.conn.execute('SHOW TABLES') - for table in c.fetchall(): - self.conn.execute('DROP TABLE %s' % table[0]) + def test_default_type_conversion(self): + df = sql.read_table("types_test_data", self.conn) - def test_default_type_conversion(self): - df = sql.read_table("types_test_data", self.conn) - - self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), - "FloatCol loaded with incorrect type") - self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), - "IntCol loaded with incorrect type") - # MySQL has no real BOOL type (it's an alias for TINYINT) - self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), - "BoolCol loaded with incorrect type") - - # Int column with NA values stays as float - self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), - "IntColWithNull loaded with incorrect type") - # Bool column with NA = int column with NA values => becomes float - self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), - "BoolColWithNull loaded with incorrect type") + self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), + "FloatCol loaded with incorrect type") + self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), + "IntCol loaded with incorrect type") + # MySQL has no real BOOL type (it's an alias for TINYINT) + self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), + "BoolCol loaded with incorrect type") + + # Int column with NA values stays as float + self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), + "IntColWithNull loaded with incorrect type") + # Bool column with NA = int column with NA values => becomes float + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), + "BoolColWithNull loaded with incorrect type") class TestPostgreSQLAlchemy(_TestSQLAlchemy): @@ -780,26 +785,28 @@ class TestPostgreSQLAlchemy(_TestSQLAlchemy): def connect(self): return sqlalchemy.create_engine( 'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver)) - + def setUp(self): if not SQLALCHEMY_INSTALLED: raise nose.SkipTest('SQLAlchemy not installed') - + try: import psycopg2 self.driver = 'psycopg2' - except ImportError: - raise nose.SkipTest - - self.conn = self.connect() - self.pandasSQL = sql.PandasSQLAlchemy(self.conn) - + raise nose.SkipTest('psycopg2 not installed') + + try: + self.conn = self.connect() + self.pandasSQL = sql.PandasSQLAlchemy(self.conn) + except sqlalchemy.exc.OperationalError: + raise nose.SkipTest("Can't connect to PostgreSQL server") + self._load_iris_data() self._load_raw_sql() - + self._load_test1_data() - + def tearDown(self): c = self.conn.execute( "SELECT table_name FROM information_schema.tables"
Further work on #6292, non-controversial part of #6627.
https://api.github.com/repos/pandas-dev/pandas/pulls/6651
2014-03-16T11:11:01Z
2014-03-16T11:12:29Z
2014-03-16T11:12:29Z
2014-07-03T07:26:40Z
PERF: Speed up DatetimeConverter by using Matplotlib's epoch2num when possible...
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2583b47d9b3bf..6f9aa1c01fc37 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -159,6 +159,8 @@ Improvements to existing features - ``StataWriter`` and ``DataFrame.to_stata`` accept time stamp and data labels (:issue:`6545`) - offset/freq info now in Timestamp __repr__ (:issue:`4553`) - Support passing ``encoding`` with xlwt (:issue:`3710`) +- Performance improvement when converting ``DatetimeIndex`` to floating ordinals + using ``DatetimeConverter`` (:issue:`6636`) .. _release.bug_fixes-0.14.0: diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index d059d229ef22e..b9939976fded8 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -16,6 +16,7 @@ import pandas.core.common as com from pandas.core.index import Index +from pandas.core.series import Series from pandas.tseries.index import date_range import pandas.tseries.tools as tools import pandas.tseries.frequencies as frequencies @@ -144,7 +145,10 @@ def _dt_to_float_ordinal(dt): preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. """ - base = dates.date2num(dt) + if isinstance(dt, (np.ndarray, Series)) and com.is_datetime64_ns_dtype(dt): + base = dates.epoch2num(dt.asi8 / 1.0E9) + else: + base = dates.date2num(dt) return base diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py index 29137f9cb3e50..902b9cb549e32 100644 --- a/pandas/tseries/tests/test_converter.py +++ b/pandas/tseries/tests/test_converter.py @@ -5,8 +5,11 @@ import nose import numpy as np +from numpy.testing import assert_almost_equal as np_assert_almost_equal +from pandas import Timestamp from pandas.compat import u import pandas.util.testing as tm +from pandas.tseries.offsets import Second, Milli, Micro try: import pandas.tseries.converter as converter @@ -46,9 +49,48 @@ def test_conversion(self): rs = self.dtc.convert('2012-1-1', None, None) self.assertEqual(rs, xp) + rs = self.dtc.convert(Timestamp('2012-1-1'), None, None) + self.assertEqual(rs, xp) + + def test_conversion_float(self): + decimals = 9 + + rs = self.dtc.convert(Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None) + xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC')) + np_assert_almost_equal(rs, xp, decimals) + + rs = self.dtc.convert(Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None) + np_assert_almost_equal(rs, xp, decimals) + + rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None) + np_assert_almost_equal(rs, xp, decimals) + def test_time_formatter(self): self.tc(90000) + def test_dateindex_conversion(self): + decimals = 9 + + for freq in ('B', 'L', 'S'): + dateindex = tm.makeDateIndex(k = 10, freq = freq) + rs = self.dtc.convert(dateindex, None, None) + xp = converter.dates.date2num(dateindex) + np_assert_almost_equal(rs, xp, decimals) + + def test_resolution(self): + def _assert_less(ts1, ts2): + val1 = self.dtc.convert(ts1, None, None) + val2 = self.dtc.convert(ts2, None, None) + if not val1 < val2: + raise AssertionError('{0} is not less than {1}.'.format(val1, val2)) + + # Matplotlib's time representation using floats cannot distinguish intervals smaller + # than ~10 microsecond in the common range of years. + ts = Timestamp('2012-1-1') + _assert_less(ts, ts + Second()) + _assert_less(ts, ts + Milli()) + _assert_less(ts, ts + Micro(50)) + if __name__ == '__main__': import nose diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index c43d2fb76dbdb..93821c3be3c2c 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -269,3 +269,15 @@ def date_range(start=None, end=None, periods=None, freq=None): dataframe_resample_max_numpy = \ Benchmark("df.resample('1s', how=np.max)", setup) + +#---------------------------------------------------------------------- +# DatetimeConverter + +setup = common_setup + """ +from pandas.tseries.converter import DatetimeConverter +""" + +datetimeindex_converter = \ + Benchmark('DatetimeConverter.convert(rng, None, None)', + setup, start_date=datetime(2013, 1, 1)) +
closes #6636 This fixes the performance bottleneck described in #6636 by using the vectorized epoch2num for DataIndex arrays. The included test ensures that the DatetimeConverter produces the same results as the Matplotlib's date2num for individual Timestamp objects as well as DatetimeIndex array objects.
https://api.github.com/repos/pandas-dev/pandas/pulls/6650
2014-03-15T21:17:46Z
2014-03-17T19:48:28Z
2014-03-17T19:48:28Z
2014-06-19T14:26:36Z
PLAT: platform sorting issue surfaced with time_grouper
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7bf20d71cb301..683c07b70d0f2 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -169,7 +169,7 @@ def __new__(cls, *args, **kwargs): cls = TimeGrouper return super(Grouper, cls).__new__(cls) - def __init__(self, key=None, level=None, freq=None, axis=None, sort=True): + def __init__(self, key=None, level=None, freq=None, axis=None, sort=False): self.key=key self.level=level self.freq=freq @@ -201,7 +201,7 @@ def get_grouper(self, obj): self.set_grouper(obj) return self.binner, self.grouper, self.obj - def set_grouper(self, obj): + def set_grouper(self, obj, sort=False): """ given an object and the specifcations, setup the internal grouper for this particular specification @@ -243,7 +243,7 @@ def set_grouper(self, obj): raise ValueError("The grouper level {0} is not valid".format(level)) # possibly sort - if not ax.is_monotonic: + if (self.sort or sort) and not ax.is_monotonic: indexer = self.indexer = ax.argsort(kind='quicksort') ax = ax.take(indexer) obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False) @@ -1608,6 +1608,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = _convert_grouper(index, grouper) self.index = index self.sort = sort + self.obj = obj # right place for this? if isinstance(grouper, (Series, Index)) and name is None: @@ -1684,7 +1685,8 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, elif isinstance(self.grouper, Grouper): # get the new grouper - grouper = self.grouper.get_binner_for_grouping(obj) + grouper = self.grouper.get_binner_for_grouping(self.obj) + self.obj = self.grouper.obj self.grouper = grouper if self.name is None: self.name = grouper.name diff --git a/pandas/core/index.py b/pandas/core/index.py index 0343475be377e..3eab4d0339082 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -883,7 +883,10 @@ def argsort(self, *args, **kwargs): """ See docstring for ndarray.argsort """ - return self.view(np.ndarray).argsort(*args, **kwargs) + result = self.asi8 + if result is None: + result = self.view(np.ndarray) + return result.argsort(*args, **kwargs) def __add__(self, other): if isinstance(other, Index): diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 8ab7063eada17..b29f67b40894b 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -70,13 +70,13 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', self.limit = limit self.base = base - # by definition we always sort + # always sort time groupers kwargs['sort'] = True super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs) def resample(self, obj): - self.set_grouper(obj) + self.set_grouper(obj, sort=True) ax = self.grouper if isinstance(ax, DatetimeIndex):
https://api.github.com/repos/pandas-dev/pandas/pulls/6649
2014-03-15T18:26:46Z
2014-03-17T12:25:22Z
2014-03-17T12:25:22Z
2014-07-22T18:41:02Z
CLN: Fix many indentation errors found in #6643.
diff --git a/doc/make.py b/doc/make.py index 8a92654d1378b..4367ac91396bb 100755 --- a/doc/make.py +++ b/doc/make.py @@ -77,10 +77,10 @@ def upload_prev(ver, doc_root='./'): raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root)) def build_pandas(): - os.chdir('..') - os.system('python setup.py clean') - os.system('python setup.py build_ext --inplace') - os.chdir('doc') + os.chdir('..') + os.system('python setup.py clean') + os.system('python setup.py build_ext --inplace') + os.chdir('doc') def build_prev(ver): if os.system('git checkout v%s' % ver) != 1: diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index e22b6218a2227..5489893df06b9 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -819,7 +819,7 @@ def testit(r_idx_type, c_idx_type, index_name): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) for r_idx_type, c_idx_type, index_name in args: - testit(r_idx_type, c_idx_type, index_name) + testit(r_idx_type, c_idx_type, index_name) def test_basic_frame_series_alignment(self): for engine, parser in ENGINES_PARSERS: diff --git a/pandas/core/format.py b/pandas/core/format.py index c2f439877ca00..b11b2e7270271 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -539,18 +539,18 @@ def write(buf, frame, column_format, strcols, longtable=False): buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.escape: - crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first - .replace('_', '\\_') - .replace('%', '\\%') - .replace('$', '\\$') - .replace('#', '\\#') - .replace('{', '\\{') - .replace('}', '\\}') - .replace('~', '\\textasciitilde') - .replace('^', '\\textasciicircum') - .replace('&', '\\&') if x else '{}') for x in row] + crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first + .replace('_', '\\_') + .replace('%', '\\%') + .replace('$', '\\$') + .replace('#', '\\#') + .replace('{', '\\{') + .replace('}', '\\}') + .replace('~', '\\textasciitilde') + .replace('^', '\\textasciicircum') + .replace('&', '\\&') if x else '{}') for x in row] else: - crow = [x if x else '{}' for x in row] + crow = [x if x else '{}' for x in row] buf.write(' & '.join(crow)) buf.write(' \\\\\n') @@ -2104,7 +2104,7 @@ def detect_console_encoding(): # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): - encoding = sys.getdefaultencoding() + encoding = sys.getdefaultencoding() # GH3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f486d48b58651..cf9ff8abff3ef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3598,10 +3598,10 @@ def pretty_name(x): return '%.1f%%' % x def describe_numeric_1d(series, percentiles): - return ([series.count(), series.mean(), series.std(), - series.min()] + - [series.quantile(x) for x in percentiles] + - [series.max()]) + return ([series.count(), series.mean(), series.std(), + series.min()] + + [series.quantile(x) for x in percentiles] + + [series.max()]) def describe_categorical_1d(data): names = ['count', 'unique'] diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index dc8b7f3bccc2a..4d3927428cef2 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2218,9 +2218,9 @@ def transform(self, func, *args, **kwargs): # may need to astype try: - common_type = np.common_type(np.array(res), result) - if common_type != result.dtype: - result = result.astype(common_type) + common_type = np.common_type(np.array(res), result) + if common_type != result.dtype: + result = result.astype(common_type) except: pass diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c4550a18492cb..bfff85ac4712c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1131,13 +1131,13 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): raise NotImplementedError() def _getbool_axis(self, key, axis=0): - labels = self.obj._get_axis(axis) - key = _check_bool_indexer(labels, key) - inds, = key.nonzero() - try: - return self.obj.take(inds, axis=axis, convert=False) - except Exception as detail: - raise self._exception(detail) + labels = self.obj._get_axis(axis) + key = _check_bool_indexer(labels, key) + inds, = key.nonzero() + try: + return self.obj.take(inds, axis=axis, convert=False) + except Exception as detail: + raise self._exception(detail) def _get_slice_axis(self, slice_obj, axis=0): """ this is pretty simple as we just have to deal with labels """ @@ -1193,7 +1193,7 @@ def _has_valid_type(self, key, axis): ) elif com._is_bool_indexer(key): - return True + return True elif _is_list_like(key): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 105c0c3985cc1..75ec53c95869a 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -824,7 +824,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False): is_transposed = False if hasattr(other, 'ndim') and hasattr(values, 'ndim'): if values.ndim != other.ndim: - is_transposed = True + is_transposed = True else: if values.shape == other.shape[::-1]: is_transposed = True @@ -2981,7 +2981,7 @@ def _is_indexed_like(self, other): def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): - return False + return False if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False self._consolidate_inplace() diff --git a/pandas/core/series.py b/pandas/core/series.py index b66b74a011c4d..bdad1f9e5561b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -672,7 +672,7 @@ def _set_with(self, key, value): else: return self._set_values(key, value) elif key_type == 'boolean': - self._set_values(key.astype(np.bool_), value) + self._set_values(key.astype(np.bool_), value) else: self._set_labels(key, value) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 6372d83f50051..67107ee20b336 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -291,10 +291,10 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0, year = (value.timetuple())[0:3] if ((not epoch1904 and year == (1899, 12, 31)) or (epoch1904 and year == (1904, 1, 1))): - value = datetime.time(value.hour, - value.minute, - value.second, - value.microsecond) + value = datetime.time(value.hour, + value.minute, + value.second, + value.microsecond) else: # Use the xlrd <= 0.9.2 date handling. dt = xldate.xldate_as_tuple(value, epoch1904) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 58338a47d9465..a52be0ee6a82e 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -75,7 +75,7 @@ def test_isnull(): # series for s in [tm.makeFloatSeries(),tm.makeStringSeries(), tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]: - assert(isinstance(isnull(s), Series)) + assert(isinstance(isnull(s), Series)) # frame for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4ebf3dd99a105..ea3dafa07715b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5563,63 +5563,63 @@ def test_to_csv_from_csv(self): pname = '__tmp_to_csv_from_csv__' with ensure_clean(pname) as path: - self.frame['A'][:5] = nan - - self.frame.to_csv(path) - self.frame.to_csv(path, columns=['A', 'B']) - self.frame.to_csv(path, header=False) - self.frame.to_csv(path, index=False) - - # test roundtrip - self.tsframe.to_csv(path) - recons = DataFrame.from_csv(path) - - assert_frame_equal(self.tsframe, recons) - - self.tsframe.to_csv(path, index_label='index') - recons = DataFrame.from_csv(path, index_col=None) - assert(len(recons.columns) == len(self.tsframe.columns) + 1) - - # no index - self.tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) - assert_almost_equal(self.tsframe.values, recons.values) - - # corner case - dm = DataFrame({'s1': Series(lrange(3), lrange(3)), - 's2': Series(lrange(2), lrange(2))}) - dm.to_csv(path) - recons = DataFrame.from_csv(path) - assert_frame_equal(dm, recons) + self.frame['A'][:5] = nan + + self.frame.to_csv(path) + self.frame.to_csv(path, columns=['A', 'B']) + self.frame.to_csv(path, header=False) + self.frame.to_csv(path, index=False) + + # test roundtrip + self.tsframe.to_csv(path) + recons = DataFrame.from_csv(path) + + assert_frame_equal(self.tsframe, recons) + + self.tsframe.to_csv(path, index_label='index') + recons = DataFrame.from_csv(path, index_col=None) + assert(len(recons.columns) == len(self.tsframe.columns) + 1) + + # no index + self.tsframe.to_csv(path, index=False) + recons = DataFrame.from_csv(path, index_col=None) + assert_almost_equal(self.tsframe.values, recons.values) + + # corner case + dm = DataFrame({'s1': Series(lrange(3), lrange(3)), + 's2': Series(lrange(2), lrange(2))}) + dm.to_csv(path) + recons = DataFrame.from_csv(path) + assert_frame_equal(dm, recons) with ensure_clean(pname) as path: - # duplicate index - df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'], - columns=['x', 'y', 'z']) - df.to_csv(path) - result = DataFrame.from_csv(path) - assert_frame_equal(result, df) - - midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) - df = DataFrame(np.random.randn(3, 3), index=midx, - columns=['x', 'y', 'z']) - df.to_csv(path) - result = DataFrame.from_csv(path, index_col=[0, 1, 2], - parse_dates=False) - assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ? - - # column aliases - col_aliases = Index(['AA', 'X', 'Y', 'Z']) - self.frame2.to_csv(path, header=col_aliases) - rs = DataFrame.from_csv(path) - xp = self.frame2.copy() - xp.columns = col_aliases - - assert_frame_equal(xp, rs) - - self.assertRaises(ValueError, self.frame2.to_csv, path, - header=['AA', 'X']) + # duplicate index + df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'], + columns=['x', 'y', 'z']) + df.to_csv(path) + result = DataFrame.from_csv(path) + assert_frame_equal(result, df) + + midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)]) + df = DataFrame(np.random.randn(3, 3), index=midx, + columns=['x', 'y', 'z']) + df.to_csv(path) + result = DataFrame.from_csv(path, index_col=[0, 1, 2], + parse_dates=False) + assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ? + + # column aliases + col_aliases = Index(['AA', 'X', 'Y', 'Z']) + self.frame2.to_csv(path, header=col_aliases) + rs = DataFrame.from_csv(path) + xp = self.frame2.copy() + xp.columns = col_aliases + + assert_frame_equal(xp, rs) + + self.assertRaises(ValueError, self.frame2.to_csv, path, + header=['AA', 'X']) with ensure_clean(pname) as path: import pandas as pd @@ -5682,12 +5682,12 @@ def _check_df(df,cols=None): rs_c.columns = df.columns.take(indexer) for c in cols: - obj_df = df[c] - obj_rs = rs_c[c] - if isinstance(obj_df,Series): - assert_series_equal(obj_df,obj_rs) - else: - assert_frame_equal(obj_df,obj_rs,check_names=False) + obj_df = df[c] + obj_rs = rs_c[c] + if isinstance(obj_df,Series): + assert_series_equal(obj_df,obj_rs) + else: + assert_frame_equal(obj_df,obj_rs,check_names=False) # wrote in the same order else: @@ -5713,80 +5713,80 @@ def test_to_csv_moar(self): def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None, dupe_col=False): - kwargs = dict(parse_dates=False) - if cnlvl: - if rnlvl is not None: - kwargs['index_col'] = lrange(rnlvl) - kwargs['header'] = lrange(cnlvl) - with ensure_clean(path) as path: - df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False) - recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs) - else: - kwargs['header'] = 0 - with ensure_clean(path) as path: - df.to_csv(path,encoding='utf8',chunksize=chunksize) - recons = DataFrame.from_csv(path,**kwargs) - - def _to_uni(x): - if not isinstance(x, compat.text_type): - return x.decode('utf8') - return x - if dupe_col: - # read_Csv disambiguates the columns by - # labeling them dupe.1,dupe.2, etc'. monkey patch columns - recons.columns = df.columns - if rnlvl and not cnlvl: - delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)] - ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl) - recons.index = ix - recons = recons.iloc[:,rnlvl-1:] - - type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O') - if r_dtype: - if r_dtype == 'u': # unicode - r_dtype='O' - recons.index = np.array(lmap(_to_uni,recons.index), - dtype=r_dtype) - df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype) - if r_dtype == 'dt': # unicode - r_dtype='O' - recons.index = np.array(lmap(Timestamp,recons.index), - dtype=r_dtype) - df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype) - elif r_dtype == 'p': - r_dtype='O' - recons.index = np.array(list(map(Timestamp, - recons.index.to_datetime())), - dtype=r_dtype) - df.index = np.array(list(map(Timestamp, - df.index.to_datetime())), + kwargs = dict(parse_dates=False) + if cnlvl: + if rnlvl is not None: + kwargs['index_col'] = lrange(rnlvl) + kwargs['header'] = lrange(cnlvl) + with ensure_clean(path) as path: + df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False) + recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs) + else: + kwargs['header'] = 0 + with ensure_clean(path) as path: + df.to_csv(path,encoding='utf8',chunksize=chunksize) + recons = DataFrame.from_csv(path,**kwargs) + + def _to_uni(x): + if not isinstance(x, compat.text_type): + return x.decode('utf8') + return x + if dupe_col: + # read_Csv disambiguates the columns by + # labeling them dupe.1,dupe.2, etc'. monkey patch columns + recons.columns = df.columns + if rnlvl and not cnlvl: + delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)] + ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl) + recons.index = ix + recons = recons.iloc[:,rnlvl-1:] + + type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O') + if r_dtype: + if r_dtype == 'u': # unicode + r_dtype='O' + recons.index = np.array(lmap(_to_uni,recons.index), dtype=r_dtype) - else: - r_dtype= type_map.get(r_dtype) - recons.index = np.array(recons.index,dtype=r_dtype ) - df.index = np.array(df.index,dtype=r_dtype ) - if c_dtype: - if c_dtype == 'u': - c_dtype='O' - recons.columns = np.array(lmap(_to_uni,recons.columns), - dtype=c_dtype) - df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype ) - elif c_dtype == 'dt': - c_dtype='O' - recons.columns = np.array(lmap(Timestamp,recons.columns), + df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype) + if r_dtype == 'dt': # unicode + r_dtype='O' + recons.index = np.array(lmap(Timestamp,recons.index), + dtype=r_dtype) + df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype) + elif r_dtype == 'p': + r_dtype='O' + recons.index = np.array(list(map(Timestamp, + recons.index.to_datetime())), + dtype=r_dtype) + df.index = np.array(list(map(Timestamp, + df.index.to_datetime())), + dtype=r_dtype) + else: + r_dtype= type_map.get(r_dtype) + recons.index = np.array(recons.index,dtype=r_dtype ) + df.index = np.array(df.index,dtype=r_dtype ) + if c_dtype: + if c_dtype == 'u': + c_dtype='O' + recons.columns = np.array(lmap(_to_uni,recons.columns), + dtype=c_dtype) + df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype ) + elif c_dtype == 'dt': + c_dtype='O' + recons.columns = np.array(lmap(Timestamp,recons.columns), dtype=c_dtype ) - df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype) - elif c_dtype == 'p': - c_dtype='O' - recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()), - dtype=c_dtype) - df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype ) - else: - c_dtype= type_map.get(c_dtype) - recons.columns = np.array(recons.columns,dtype=c_dtype ) - df.columns = np.array(df.columns,dtype=c_dtype ) + df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype) + elif c_dtype == 'p': + c_dtype='O' + recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()), + dtype=c_dtype) + df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype ) + else: + c_dtype= type_map.get(c_dtype) + recons.columns = np.array(recons.columns,dtype=c_dtype ) + df.columns = np.array(df.columns,dtype=c_dtype ) - assert_frame_equal(df,recons,check_names=False,check_less_precise=True) + assert_frame_equal(df,recons,check_names=False,check_less_precise=True) N = 100 chunksize=1000 @@ -5794,16 +5794,16 @@ def _to_uni(x): # GH3437 from pandas import NaT def make_dtnat_arr(n,nnat=None): - if nnat is None: - nnat= int(n*0.1) # 10% - s=list(date_range('2000',freq='5min',periods=n)) - if nnat: - for i in np.random.randint(0,len(s),nnat): - s[i] = NaT - i = np.random.randint(100) - s[-i] = NaT - s[i] = NaT - return s + if nnat is None: + nnat= int(n*0.1) # 10% + s=list(date_range('2000',freq='5min',periods=n)) + if nnat: + for i in np.random.randint(0,len(s),nnat): + s[i] = NaT + i = np.random.randint(100) + s[-i] = NaT + s[i] = NaT + return s # N=35000 s1=make_dtnat_arr(chunksize+5) @@ -5879,11 +5879,11 @@ def test_to_csv_from_csv_w_some_infs(self): self.frame['H'] = self.frame.index.map(f) with ensure_clean() as path: - self.frame.to_csv(path) - recons = DataFrame.from_csv(path) - - assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name - assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False) + self.frame.to_csv(path) + recons = DataFrame.from_csv(path) + + assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name + assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False) def test_to_csv_from_csv_w_all_infs(self): @@ -5938,37 +5938,37 @@ def test_to_csv_multiindex(self): with ensure_clean(pname) as path: - frame.to_csv(path, header=False) - frame.to_csv(path, columns=['A', 'B']) - - # round trip - frame.to_csv(path) - df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) - - assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name - self.assertEqual(frame.index.names, df.index.names) - self.frame.index = old_index # needed if setUP becomes a classmethod - - # try multiindex with dates - tsframe = self.tsframe - old_index = tsframe.index - new_index = [old_index, np.arange(len(old_index))] - tsframe.index = MultiIndex.from_arrays(new_index) - - tsframe.to_csv(path, index_label=['time', 'foo']) - recons = DataFrame.from_csv(path, index_col=[0, 1]) - assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name - - # do not load index - tsframe.to_csv(path) - recons = DataFrame.from_csv(path, index_col=None) - np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2) - - # no index - tsframe.to_csv(path, index=False) - recons = DataFrame.from_csv(path, index_col=None) - assert_almost_equal(recons.values, self.tsframe.values) - self.tsframe.index = old_index # needed if setUP becomes classmethod + frame.to_csv(path, header=False) + frame.to_csv(path, columns=['A', 'B']) + + # round trip + frame.to_csv(path) + df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False) + + assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name + self.assertEqual(frame.index.names, df.index.names) + self.frame.index = old_index # needed if setUP becomes a classmethod + + # try multiindex with dates + tsframe = self.tsframe + old_index = tsframe.index + new_index = [old_index, np.arange(len(old_index))] + tsframe.index = MultiIndex.from_arrays(new_index) + + tsframe.to_csv(path, index_label=['time', 'foo']) + recons = DataFrame.from_csv(path, index_col=[0, 1]) + assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name + + # do not load index + tsframe.to_csv(path) + recons = DataFrame.from_csv(path, index_col=None) + np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2) + + # no index + tsframe.to_csv(path, index=False) + recons = DataFrame.from_csv(path, index_col=None) + assert_almost_equal(recons.values, self.tsframe.values) + self.tsframe.index = old_index # needed if setUP becomes classmethod with ensure_clean(pname) as path: # GH3571, GH1651, GH3141 @@ -6077,8 +6077,8 @@ def test_to_csv_float32_nanrep(self): df.to_csv(path, na_rep=999) with open(path) as f: - lines = f.readlines() - self.assertEqual(lines[1].split(',')[2], '999') + lines = f.readlines() + self.assertEqual(lines[1].split(',')[2], '999') def test_to_csv_withcommas(self): @@ -6149,7 +6149,7 @@ def test_to_csv_dups_cols(self): # date cols for i in ['0.4','1.4','2.4']: - result[i] = to_datetime(result[i]) + result[i] = to_datetime(result[i]) result.columns = df.columns assert_frame_equal(result,df) @@ -12079,7 +12079,7 @@ def test_columns_with_dups(self): # testing iget for i in range(len(df.columns)): - df.iloc[:,i] + df.iloc[:,i] # dup columns across dtype GH 2079/2194 vals = [[1, -1, 2.], [2, -2, 3.]] diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 5f07acf25582f..3cf4cb8bc5809 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -413,8 +413,8 @@ def test_nonzero_single_element(self): # single non-bool are an error for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: - self.assertRaises(ValueError, lambda : bool(s)) - self.assertRaises(ValueError, lambda : s.bool()) + self.assertRaises(ValueError, lambda : bool(s)) + self.assertRaises(ValueError, lambda : s.bool()) def test_metadata_propagation_indiv(self): # check that the metadata matches up on the resulting ops diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index b9939976fded8..80ac97ee60617 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -513,7 +513,7 @@ def _daily_finder(vmin, vmax, freq): def first_label(label_flags): if (label_flags[0] == 0) and (label_flags.size > 1) and \ ((vmin_orig % 1) > 0.0): - return label_flags[1] + return label_flags[1] else: return label_flags[0] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 380116fc5aab5..15f11954fd022 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -94,7 +94,7 @@ def assert_numpy_array_equal(self, np_array, assert_equal): If the expected array includes `np.nan` use `assert_numpy_array_equivalent(...)`. """ if np.array_equal(np_array, assert_equal): - return + return raise AssertionError('{0} is not equal to {1}.'.format(np_array, assert_equal)) def assert_numpy_array_equivalent(self, np_array, assert_equal): @@ -108,7 +108,7 @@ def assert_numpy_array_equivalent(self, np_array, assert_equal): function. """ if array_equivalent(np_array, assert_equal): - return + return raise AssertionError('{0} is not equivalent to {1}.'.format(np_array, assert_equal)) def assertIs(self, first, second, msg=''):
Changes should only affect whitespace. closes #6643
https://api.github.com/repos/pandas-dev/pandas/pulls/6646
2014-03-15T15:26:21Z
2014-06-17T21:54:57Z
2014-06-17T21:54:57Z
2014-06-17T21:55:10Z
SQL: add index_label keyword to to_sql
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6c57a9ce5beaa..df2fbcbe32c8f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -908,7 +908,8 @@ def to_msgpack(self, path_or_buf=None, **kwargs): from pandas.io import packers return packers.to_msgpack(path_or_buf, self, **kwargs) - def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True): + def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True, + index_label=None): """ Write records stored in a DataFrame to a SQL database. @@ -928,12 +929,17 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True): - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True - Write DataFrame index as a column + Write DataFrame index as a column. + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. """ from pandas.io import sql sql.to_sql( - self, name, con, flavor=flavor, if_exists=if_exists, index=index) + self, name, con, flavor=flavor, if_exists=if_exists, index=index, + index_label=index_label) def to_pickle(self, path): """ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index f17820b06ce5e..fa89cf488125a 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -229,7 +229,8 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, parse_dates=parse_dates) -def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True): +def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, + index_label=None): """ Write records stored in a DataFrame to a SQL database. @@ -251,6 +252,11 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True): - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + """ pandas_sql = pandasSQL_builder(con, flavor=flavor) @@ -259,7 +265,8 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True): elif not isinstance(frame, DataFrame): raise NotImplementedError - pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index) + pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, + index_label=index_label) def has_table(table_name, con, meta=None, flavor='sqlite'): @@ -377,12 +384,12 @@ class PandasSQLTable(PandasObject): """ # TODO: support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, - if_exists='fail', prefix='pandas'): + if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame - self.index = self._index_name(index) + self.index = self._index_name(index, index_label) if frame is not None: # We want to write a frame @@ -473,9 +480,11 @@ def read(self, coerce_float=True, parse_dates=None, columns=None): return self.frame - def _index_name(self, index): + def _index_name(self, index, index_label): if index is True: - if self.frame.index.name is not None: + if index_label is not None: + return _safe_col_name(index_label) + elif self.frame.index.name is not None: return _safe_col_name(self.frame.index.name) else: return self.prefix + '_index' @@ -652,9 +661,11 @@ def read_sql(self, sql, index_col=None, coerce_float=True, return data_frame - def to_sql(self, frame, name, if_exists='fail', index=True): + def to_sql(self, frame, name, if_exists='fail', index=True, + index_label=None): table = PandasSQLTable( - name, self, frame=frame, index=index, if_exists=if_exists) + name, self, frame=frame, index=index, if_exists=if_exists, + index_label=index_label) table.insert() @property @@ -882,7 +893,8 @@ def _fetchall_as_list(self, cur): result = list(result) return result - def to_sql(self, frame, name, if_exists='fail', index=True): + def to_sql(self, frame, name, if_exists='fail', index=True, + index_label=None): """ Write records stored in a DataFrame to a SQL database. @@ -895,6 +907,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True): fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if does not exist. + index_label : ignored (only used in sqlalchemy mode) """ table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 8e045db0315cb..2f9323e50c9e2 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -255,7 +255,7 @@ def _tquery(self): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) -class TestSQLApi(PandasSQLTest): +class _TestSQLApi(PandasSQLTest): """Test the public API as it would be used directly, including legacy names @@ -269,12 +269,6 @@ class TestSQLApi(PandasSQLTest): """ flavor = 'sqlite' - def connect(self): - if SQLALCHEMY_INSTALLED: - return sqlalchemy.create_engine('sqlite:///:memory:') - else: - return sqlite3.connect(':memory:') - def setUp(self): self.conn = self.connect() self._load_iris_data() @@ -436,6 +430,56 @@ def test_date_and_index(self): issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") +class TestSQLApi(_TestSQLApi): + """Test the public API as it would be used directly + """ + flavor = 'sqlite' + + def connect(self): + if SQLALCHEMY_INSTALLED: + return sqlalchemy.create_engine('sqlite:///:memory:') + else: + raise nose.SkipTest('SQLAlchemy not installed') + + def test_to_sql_index_label(self): + temp_frame = DataFrame({'col1': range(4)}) + + # no index name, defaults to 'pandas_index' + sql.to_sql(temp_frame, 'test_index_label', self.conn) + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[0], 'pandas_index') + + # specifying index_label + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace', index_label='other_label') + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[0], 'other_label', + "Specified index_label not written to database") + + # using the index name + temp_frame.index.name = 'index' + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace') + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[0], 'index', + "Index name not written to database") + + # has index name, but specifying index_label + sql.to_sql(temp_frame, 'test_index_label', self.conn, + if_exists='replace', index_label='other_label') + frame = sql.read_table('test_index_label', self.conn) + self.assertEqual(frame.columns[0], 'other_label', + "Specified index_label not written to database") + + +class TestSQLLegacyApi(_TestSQLApi): + """Test the public legacy API + """ + flavor = 'sqlite' + + def connect(self): + return sqlite3.connect(':memory:') + class _TestSQLAlchemy(PandasSQLTest): """
Further work on #6292. While looking at possible multi-index support, I thought of first adding this: - added ability to specify the used column name for the index column in `to_sql` (analoguous to `to_csv`). Good idea? - I only did it for the new sqlalchemy function, not the legacy one. Only problem is that it starts from the same function call, so all keyword arguments have also to be added to the legacy `to_sql` (https://github.com/jorisvandenbossche/pandas/compare/sql-multiindex?expand=1#diff-b41f9fd042c423682f8e4c4d808dbe64R891) without using it. Is there a better approach? Should I warn that this is ignored if the user specifies this? - added tests for it to do: - [x] should also change this in generic.py - [ ] check for name conflicts (warn and suggest to use index_label?) @mangecoeur
https://api.github.com/repos/pandas-dev/pandas/pulls/6642
2014-03-14T23:01:01Z
2014-03-28T20:40:51Z
2014-03-28T20:40:51Z
2014-06-30T13:49:37Z
API: A tuple passed to DataFame.sort_index will be interpreted as the levels of the index (GH4370)
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index c2642501791e7..159cd05194300 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1287,9 +1287,18 @@ Some other sorting notes / nuances: * ``Series.sort`` sorts a Series by value in-place. This is to provide compatibility with NumPy methods which expect the ``ndarray.sort`` behavior. - * ``DataFrame.sort`` takes a ``column`` argument instead of ``by``. This - method will likely be deprecated in a future release in favor of just using - ``sort_index``. + +Sorting by a multi-index column +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You must be explicit about sorting when the column is a multi-index, and fully specify +all levels to ``by``. + +.. ipython:: python + + df1.columns = MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')]) + df1.sort_index(by=('a','two')) + Copying ------- diff --git a/doc/source/release.rst b/doc/source/release.rst index 1dbce9f98cafa..257d17bbe321b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -125,6 +125,17 @@ API Changes ``DataFrame.stack`` operations where the name of the column index is used as the name of the inserted column containing the pivoted data. +- A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of + the index, rather than requiring a list of tuple (:issue:`4370`) + +Deprecations +~~~~~~~~~~~~ + +Prior Version Deprecations/Changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) + Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 932ed4e1672b7..37e7f5b32ea84 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -276,7 +276,9 @@ You can use a right-hand-side of an alignable object as well. Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There are no announced changes in 0.13.1 or prior that are taking effect as of 0.14.0 +Therse are prior version deprecations that are taking effect as of 0.14.0. + +- Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8c5243461b7b9..8a72b0d7c4493 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2530,7 +2530,7 @@ def _m8_to_i8(x): #---------------------------------------------------------------------- # Sorting - def sort(self, columns=None, column=None, axis=0, ascending=True, + def sort(self, columns=None, axis=0, ascending=True, inplace=False): """ Sort DataFrame either by labels (along either axis) or by the values in @@ -2539,8 +2539,9 @@ def sort(self, columns=None, column=None, axis=0, ascending=True, Parameters ---------- columns : object - Column name(s) in frame. Accepts a column name or a list or tuple - for a nested sort. + Column name(s) in frame. Accepts a column name or a list + for a nested sort. A tuple will be interpreted as the + levels of a multi-index. ascending : boolean or list, default True Sort ascending vs. descending. Specify list for multiple sort orders @@ -2557,9 +2558,6 @@ def sort(self, columns=None, column=None, axis=0, ascending=True, ------- sorted : DataFrame """ - if column is not None: # pragma: no cover - warnings.warn("column is deprecated, use columns", FutureWarning) - columns = column return self.sort_index(by=columns, axis=axis, ascending=ascending, inplace=inplace) @@ -2574,8 +2572,9 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, axis : {0, 1} Sort index/rows versus columns by : object - Column name(s) in frame. Accepts a column name or a list or tuple - for a nested sort. + Column name(s) in frame. Accepts a column name or a list + for a nested sort. A tuple will be interpreted as the + levels of a multi-index. ascending : boolean or list, default True Sort ascending vs. descending. Specify list for multiple sort orders @@ -2602,7 +2601,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, if axis != 0: raise ValueError('When sorting by column, axis must be 0 ' '(rows)') - if not isinstance(by, (tuple, list)): + if not isinstance(by, list): by = [by] if com._is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by' @@ -2629,6 +2628,13 @@ def trans(v): by = by[0] k = self[by].values if k.ndim == 2: + + # try to be helpful + if isinstance(self.columns, MultiIndex): + raise ValueError('Cannot sort by column %s in a multi-index' + ' you need to explicity provide all the levels' + % str(by)) + raise ValueError('Cannot sort by duplicate column %s' % str(by)) if isinstance(ascending, (tuple, list)): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index ed88a355cf7a9..8a4207da76c52 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9797,6 +9797,17 @@ def test_sort_index_duplicates(self): # multi-column 'by' is separate codepath df.sort_index(by=['a', 'b']) + # with multi-index + # GH4370 + df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)])) + with assertRaisesRegexp(ValueError, 'levels'): + df.sort_index(by='a') + + # convert tuples to a list of tuples + expected = df.sort_index(by=[('a',1)]) + result = df.sort_index(by=('a',1)) + assert_frame_equal(result, expected) + def test_sort_datetimes(self): # GH 3461, argsort / lexsort differences for a datetime column
closes #4370 ``` In [3]: df = DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]}) In [7]: df.columns = MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')]) In [8]: df Out[8]: a b one two three 0 2 5 1 1 1 4 3 2 1 3 2 3 1 2 4 [4 rows x 3 columns] ``` These are now equivalent. Used to be able to do only the 2nd. ``` In [9]: df.sort_index(by=('a','two')) Out[9]: a b one two three 3 1 2 4 2 1 3 2 1 1 4 3 0 2 5 1 [4 rows x 3 columns] In [10]: df.sort_index(by=[('a','two')]) Out[10]: a b one two three 3 1 2 4 2 1 3 2 1 1 4 3 0 2 5 1 [4 rows x 3 columns] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6639
2014-03-14T21:38:21Z
2014-03-15T14:34:37Z
2014-03-15T14:34:37Z
2014-06-24T17:52:10Z
BUG: Bug in fillna with limit and value specified
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6f83bea6a6578..4890f22e98468 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -232,7 +232,7 @@ Bug Fixes - Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) - Bug in popping from a Series (:issue:`6600`) - Bug in ``iloc`` indexing when positional indexer matched Int64Index of corresponding axis no reordering happened (:issue:`6612`) - +- Bug in ``fillna`` with ``limit`` and ``value`` specified pandas 0.13.1 ------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 678578a19e221..0a8f57c581d92 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2162,7 +2162,9 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, from pandas import Series value = Series(value) - new_data = self._data.fillna(value=value, inplace=inplace, + new_data = self._data.fillna(value=value, + limit=limit, + inplace=inplace, downcast=downcast) elif isinstance(value, (dict, com.ABCSeries)): @@ -2176,10 +2178,12 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, if k not in result: continue obj = result[k] - obj.fillna(v, inplace=True) + obj.fillna(v, limit=limit, inplace=True) return result else: - new_data = self._data.fillna(value=value, inplace=inplace, + new_data = self._data.fillna(value=value, + limit=limit, + inplace=inplace, downcast=downcast) if inplace: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a5b9e874cea41..fc7b4bc23ac09 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -367,7 +367,7 @@ def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """ return self.as_block(func(self.values)) - def fillna(self, value, inplace=False, downcast=None): + def fillna(self, value, limit=None, inplace=False, downcast=None): if not self._can_hold_na: if inplace: return [self] @@ -375,6 +375,11 @@ def fillna(self, value, inplace=False, downcast=None): return [self.copy()] mask = com.isnull(self.values) + if limit is not None: + if self.ndim > 2: + raise NotImplementedError + mask[mask.cumsum(self.ndim-1)>limit]=False + value = self._try_fill(value) blocks = self.putmask(mask, value, inplace=inplace) return self._maybe_downcast(blocks, downcast) @@ -1680,11 +1685,18 @@ def _try_fill(self, value): value = tslib.iNaT return value - def fillna(self, value, inplace=False, downcast=None): + def fillna(self, value, limit=None, + inplace=False, downcast=None): + # straight putmask here values = self.values if inplace else self.values.copy() mask = com.isnull(self.values) value = self._try_fill(value) + if limit is not None: + if self.ndim > 2: + raise NotImplementedError + mask[mask.cumsum(self.ndim-1)>limit]=False + np.putmask(values, mask, value) return [self if inplace else make_block(values, self.items, self.ref_items, fastpath=True)] @@ -1889,8 +1901,10 @@ def interpolate(self, method='pad', axis=0, inplace=False, self.values.to_dense(), method, axis, limit, fill_value) return self.make_block(values, self.items, self.ref_items) - def fillna(self, value, inplace=False, downcast=None): + def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype + if limit is not None: + raise NotImplementedError if issubclass(self.dtype.type, np.floating): value = float(value) values = self.values if inplace else self.values.copy() diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4758670517df0..ed88a355cf7a9 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7113,6 +7113,17 @@ def test_fillna(self): df.fillna({ 2: 'foo' }, inplace=True) assert_frame_equal(df, expected) + # limit and value + df = DataFrame(np.random.randn(10,3)) + df.iloc[2:7,0] = np.nan + df.iloc[3:5,2] = np.nan + + expected = df.copy() + expected.iloc[2,0] = 999 + expected.iloc[3,2] = 999 + result = df.fillna(999,limit=1) + assert_frame_equal(result, expected) + def test_fillna_dtype_conversion(self): # make sure that fillna on an empty frame works df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5]) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 3f6e4c6f3288c..b2721689f574d 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1322,6 +1322,11 @@ def test_fillna(self): self.assertRaises(TypeError, self.panel.fillna, [1, 2]) self.assertRaises(TypeError, self.panel.fillna, (1, 2)) + # limit not implemented when only value is specified + p = Panel(np.random.randn(3,4,5)) + p.iloc[0:2,0:2,0:2] = np.nan + self.assertRaises(NotImplementedError, lambda : p.fillna(999,limit=1)) + def test_ffill_bfill(self): assert_panel_equal(self.panel.ffill(), self.panel.fillna(method='ffill')) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 93fa739f7f218..b90cdcf55f636 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2941,6 +2941,16 @@ def test_fillna(self): expected = Series([0,0,2.], list('bac')) assert_series_equal(result,expected) + # limit + s = Series(np.nan,index=[0,1,2]) + result = s.fillna(999,limit=1) + expected = Series([999,np.nan,np.nan],index=[0,1,2]) + assert_series_equal(result,expected) + + result = s.fillna(999,limit=2) + expected = Series([999,999,np.nan],index=[0,1,2]) + assert_series_equal(result,expected) + def test_fillna_bug(self): x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd']) filled = x.fillna(method='ffill')
from SO: http://stackoverflow.com/questions/22379343/change-first-occurrence-of-nan-in-a-specific-dataframe-row-to-a-new-value/22380225#22380225
https://api.github.com/repos/pandas-dev/pandas/pulls/6628
2014-03-13T13:32:40Z
2014-03-13T14:37:40Z
2014-03-13T14:37:40Z
2014-06-25T08:40:04Z
BUG: Error in to_stata when DataFrame contains non-string column names
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4890f22e98468..12a83f48706e5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -233,6 +233,7 @@ Bug Fixes - Bug in popping from a Series (:issue:`6600`) - Bug in ``iloc`` indexing when positional indexer matched Int64Index of corresponding axis no reordering happened (:issue:`6612`) - Bug in ``fillna`` with ``limit`` and ``value`` specified +- Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) pandas 0.13.1 ------------- diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7d9d272eea1b6..4bb61e385a75c 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -20,7 +20,7 @@ from pandas.core.categorical import Categorical import datetime from pandas import compat -from pandas.compat import long, lrange, lmap, lzip +from pandas.compat import long, lrange, lmap, lzip, text_type, string_types from pandas import isnull from pandas.io.common import get_filepath_or_buffer from pandas.tslib import NaT @@ -191,6 +191,21 @@ class PossiblePrecisionLoss(Warning): """ +class InvalidColumnName(Warning): + pass + + +invalid_name_doc = """ +Not all pandas column names were valid Stata variable names. +The following replacements have been made: + + {0} + +If this is not what you expect, please make sure you have Stata-compliant +column names in your DataFrame (strings only, max 32 characters, only alphanumerics and +underscores, no Stata reserved words) +""" + def _cast_to_stata_types(data): """Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and @@ -942,7 +957,7 @@ def _maybe_convert_to_int_keys(convert_dates, varlist): else: if not isinstance(key, int): raise ValueError( - "convery_dates key is not in varlist and is not an int" + "convert_dates key is not in varlist and is not an int" ) new_dict.update({key: convert_dates[key]}) return new_dict @@ -1092,6 +1107,78 @@ def _write(self, to_write): else: self._file.write(to_write) + + def _check_column_names(self, data): + """Checks column names to ensure that they are valid Stata column names. + This includes checks for: + * Non-string names + * Stata keywords + * Variables that start with numbers + * Variables with names that are too long + + When an illegal variable name is detected, it is converted, and if dates + are exported, the variable name is propogated to the date conversion + dictionary + """ + converted_names = [] + columns = list(data.columns) + original_columns = columns[:] + + duplicate_var_id = 0 + for j, name in enumerate(columns): + orig_name = name + if not isinstance(name, string_types): + name = text_type(name) + + for c in name: + if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \ + (c < '0' or c > '9') and c != '_': + name = name.replace(c, '_') + + # Variable name must not be a reserved word + if name in self.RESERVED_WORDS: + name = '_' + name + + # Variable name may not start with a number + if name[0] >= '0' and name[0] <= '9': + name = '_' + name + + name = name[:min(len(name), 32)] + + if not name == orig_name: + # check for duplicates + while columns.count(name) > 0: + # prepend ascending number to avoid duplicates + name = '_' + str(duplicate_var_id) + name + name = name[:min(len(name), 32)] + duplicate_var_id += 1 + + # need to possibly encode the orig name if its unicode + try: + orig_name = orig_name.encode('utf-8') + except: + pass + converted_names.append('{0} -> {1}'.format(orig_name, name)) + + columns[j] = name + + data.columns = columns + + # Check date conversion, and fix key if needed + if self._convert_dates: + for c, o in zip(columns, original_columns): + if c != o: + self._convert_dates[c] = self._convert_dates[o] + del self._convert_dates[o] + + if converted_names: + import warnings + + ws = invalid_name_doc.format('\n '.join(converted_names)) + warnings.warn(ws, InvalidColumnName) + + return data + def _prepare_pandas(self, data): #NOTE: we might need a different API / class for pandas objects so # we can set different semantics - handle this with a PR to pandas.io @@ -1108,6 +1195,8 @@ def __iter__(self): data = data.reset_index() # Check columns for compatibility with stata data = _cast_to_stata_types(data) + # Ensure column names are strings + data = self._check_column_names(data) self.datarows = DataFrameRowIter(data) self.nobs, self.nvar = data.shape self.data = data @@ -1181,58 +1270,13 @@ def _write_descriptors(self, typlist=None, varlist=None, srtlist=None, for typ in self.typlist: self._write(typ) - # varlist, length 33*nvar, char array, null terminated - converted_names = [] - duplicate_var_id = 0 - for j, name in enumerate(self.varlist): - orig_name = name - # Replaces all characters disallowed in .dta format by their integral representation. - for c in name: - if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and c != '_': - name = name.replace(c, '_') - # Variable name must not be a reserved word - if name in self.RESERVED_WORDS: - name = '_' + name - # Variable name may not start with a number - if name[0] > '0' and name[0] < '9': - name = '_' + name - - name = name[:min(len(name), 32)] - - if not name == orig_name: - # check for duplicates - while self.varlist.count(name) > 0: - # prepend ascending number to avoid duplicates - name = '_' + str(duplicate_var_id) + name - name = name[:min(len(name), 32)] - duplicate_var_id += 1 - - # need to possibly encode the orig name if its unicode - try: - orig_name = orig_name.encode('utf-8') - except: - pass - - converted_names.append('{0} -> {1}'.format(orig_name, name)) - self.varlist[j] = name - + # varlist names are checked by _check_column_names + # varlist, requires null terminated for name in self.varlist: name = self._null_terminate(name, True) name = _pad_bytes(name[:32], 33) self._write(name) - if converted_names: - from warnings import warn - warn("""Not all pandas column names were valid Stata variable names. - Made the following replacements: - - {0} - - If this is not what you expect, please make sure you have Stata-compliant - column names in your DataFrame (max 32 characters, only alphanumerics and - underscores)/ - """.format('\n '.join(converted_names))) - # srtlist, 2*(nvar+1), int array, encoded by byteorder srtlist = _pad_bytes("", (2*(nvar+1))) self._write(srtlist) diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index a99420493d047..fe79bf20615bb 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -13,7 +13,7 @@ import pandas as pd from pandas.core.frame import DataFrame, Series from pandas.io.parsers import read_csv -from pandas.io.stata import read_stata, StataReader +from pandas.io.stata import read_stata, StataReader, InvalidColumnName import pandas.util.testing as tm from pandas.util.misc import is_little_endian from pandas import compat @@ -332,10 +332,10 @@ def test_read_write_dta12(self): tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) def test_read_write_dta13(self): - s1 = Series(2**9,dtype=np.int16) - s2 = Series(2**17,dtype=np.int32) - s3 = Series(2**33,dtype=np.int64) - original = DataFrame({'int16':s1,'int32':s2,'int64':s3}) + s1 = Series(2**9, dtype=np.int16) + s2 = Series(2**17, dtype=np.int32) + s3 = Series(2**33, dtype=np.int64) + original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3}) original.index.name = 'index' formatted = original @@ -398,6 +398,22 @@ def test_timestamp_and_label(self): assert parsed_time_stamp == time_stamp assert reader.data_label == data_label + def test_numeric_column_names(self): + original = DataFrame(np.reshape(np.arange(25.0), (5, 5))) + original.index.name = 'index' + with tm.ensure_clean() as path: + # should get a warning for that format. + with warnings.catch_warnings(record=True) as w: + tm.assert_produces_warning(original.to_stata(path), InvalidColumnName) + # should produce a single warning + np.testing.assert_equal(len(w), 1) + + written_and_read_again = self.read_dta(path) + written_and_read_again = written_and_read_again.set_index('index') + columns = list(written_and_read_again.columns) + convert_col_name = lambda x: int(x[1]) + written_and_read_again.columns = map(convert_col_name, columns) + tm.assert_frame_equal(original, written_and_read_again) if __name__ == '__main__':
closes #4558 to_stata does not work correctly when used with non-string names. Since Stata requires string names, the proposed fix attempts to rename columns using the string representation of the column name used. A warning is raised if the column name is changed.
https://api.github.com/repos/pandas-dev/pandas/pulls/6622
2014-03-13T00:37:24Z
2014-03-13T22:19:16Z
2014-03-13T22:19:16Z
2014-07-16T08:58:09Z
CLN #6589 remove print statements when sep=None is passed to read_csv
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index e7d9145aa9d68..b45b8929e7af3 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -535,7 +535,6 @@ def _clean_options(self, options, engine): if sep is None and not delim_whitespace: if engine == 'c': - print('Using Python parser to sniff delimiter') engine = 'python' elif sep is not None and len(sep) > 1: # wait until regex engine integrated
Simply removed unneceesary print. fix https://github.com/pydata/pandas/issues/6589
https://api.github.com/repos/pandas-dev/pandas/pulls/6615
2014-03-12T14:04:31Z
2014-03-12T14:22:51Z
2014-03-12T14:22:51Z
2014-07-11T13:43:40Z
BUG/CLN: fix iloc when positional indexer matched Int64Index key
diff --git a/doc/source/release.rst b/doc/source/release.rst index 226fce52dbde1..6f83bea6a6578 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -231,6 +231,8 @@ Bug Fixes - Bug in fillna with method = 'bfill/ffill' and ``datetime64[ns]`` dtype (:issue:`6587`) - Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) - Bug in popping from a Series (:issue:`6600`) +- Bug in ``iloc`` indexing when positional indexer matched Int64Index of corresponding axis no reordering happened (:issue:`6612`) + pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6a8786229e479..e6e275a5f48b0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1613,10 +1613,8 @@ def _ixs(self, i, axis=0, copy=False): else: label = self.index[i] if isinstance(label, Index): - # a location index by definition - i = _maybe_convert_indices(i, len(self._get_axis(axis))) - result = self.reindex(i, takeable=True) + result = self.take(i, axis=axis) copy=True else: new_values, copy = self._data.fast_xs(i, copy=copy) @@ -2124,41 +2122,38 @@ def lookup(self, row_labels, col_labels): #---------------------------------------------------------------------- # Reindexing and alignment - def _reindex_axes(self, axes, level, limit, method, fill_value, copy, - takeable=False): + def _reindex_axes(self, axes, level, limit, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, copy, level, fill_value, - limit, takeable=takeable) + limit) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, - fill_value, limit, takeable=takeable) + fill_value, limit) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=NA, - limit=None, takeable=False): + limit=None): new_index, indexer = self.index.reindex(new_index, method, level, limit=limit, - copy_if_needed=True, - takeable=takeable) + copy_if_needed=True) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, - allow_dups=takeable) + allow_dups=False) def _reindex_columns(self, new_columns, copy, level, fill_value=NA, - limit=None, takeable=False): + limit=None): new_columns, indexer = self.columns.reindex(new_columns, level=level, limit=limit, - copy_if_needed=True, - takeable=takeable) + copy_if_needed=True) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, - allow_dups=takeable) + allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ we are guaranteed non-Nones in the axes! """ @@ -2689,10 +2684,9 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): ax = 'index' if axis == 0 else 'columns' if new_axis.is_unique: - d = {ax: new_axis} + return self.reindex(**{ax: new_axis}) else: - d = {ax: indexer, 'takeable': True} - return self.reindex(**d) + return self.take(indexer, axis=axis, convert=False) if inplace: if axis == 1: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 120e03e9962d8..678578a19e221 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1546,8 +1546,6 @@ def sort_index(self, axis=0, ascending=True): "compatible" value limit : int, default None Maximum size gap to forward or backward fill - takeable : boolean, default False - treat the passed as positional values Examples -------- @@ -1570,7 +1568,6 @@ def reindex(self, *args, **kwargs): copy = kwargs.get('copy', True) limit = kwargs.get('limit') fill_value = kwargs.get('fill_value', np.nan) - takeable = kwargs.get('takeable', False) self._consolidate_inplace() @@ -1591,11 +1588,9 @@ def reindex(self, *args, **kwargs): # perform the reindex on the axes return self._reindex_axes(axes, level, limit, - method, fill_value, copy, - takeable=takeable).__finalize__(self) + method, fill_value, copy).__finalize__(self) - def _reindex_axes(self, axes, level, limit, method, fill_value, copy, - takeable=False): + def _reindex_axes(self, axes, level, limit, method, fill_value, copy): """ perform the reinxed for all the axes """ obj = self for a in self._AXIS_ORDERS: @@ -1610,13 +1605,12 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy, axis = self._get_axis_number(a) ax = self._get_axis(a) new_index, indexer = ax.reindex( - labels, level=level, limit=limit, method=method, - takeable=takeable) + labels, level=level, limit=limit, method=method) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, method=method, fill_value=fill_value, limit=limit, copy=copy, - allow_dups=takeable) + allow_dups=False) return obj diff --git a/pandas/core/index.py b/pandas/core/index.py index 3bc3783fffcbd..0343475be377e 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1308,7 +1308,7 @@ def _get_method(self, method): return aliases.get(method, method) def reindex(self, target, method=None, level=None, limit=None, - copy_if_needed=False, takeable=False): + copy_if_needed=False): """ For Index, simply returns the new index and the results of get_indexer. Provided here to enable an interface that is amenable for @@ -1336,13 +1336,6 @@ def reindex(self, target, method=None, level=None, limit=None, target = self.copy() else: - - if takeable: - if method is not None or limit is not None: - raise ValueError("cannot do a takeable reindex with " - "with a method or limit") - return self[target], target - if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit) @@ -3113,7 +3106,7 @@ def get_indexer(self, target, method=None, limit=None): return com._ensure_platform_int(indexer) def reindex(self, target, method=None, level=None, limit=None, - copy_if_needed=False, takeable=False): + copy_if_needed=False): """ Performs any necessary conversion on the input index and calls get_indexer. This method is here so MultiIndex and an Index of @@ -3124,10 +3117,6 @@ def reindex(self, target, method=None, level=None, limit=None, (new_index, indexer, mask) : (MultiIndex, ndarray, ndarray) """ - # a direct takeable - if takeable: - return self.take(target), target - if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') @@ -3142,14 +3131,8 @@ def reindex(self, target, method=None, level=None, limit=None, indexer = self.get_indexer(target, method=method, limit=limit) else: - if takeable: - if method is not None or limit is not None: - raise ValueError("cannot do a takeable reindex " - "with a method or limit") - return self[target], target - raise Exception( - "cannot handle a non-takeable non-unique multi-index!") + "cannot handle a non-unique multi-index!") if not isinstance(target, MultiIndex): if indexer is None: diff --git a/pandas/core/series.py b/pandas/core/series.py index 409bbf60193af..bc5566ce4baa1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -460,8 +460,7 @@ def _ixs(self, i, axis=0): else: label = self.index[i] if isinstance(label, Index): - i = _maybe_convert_indices(i, len(self)) - return self.reindex(i, takeable=True) + return self.take(i, axis=axis, convert=True) else: return _index.get_value_at(self, i)
closes #6612 As discussed in #6612, `reindex(..., takeable=True)` as released in 0.13.0 didn't do anything that's not achieved by `.take`, `.iloc` and `.copy` attrs/methods, but also contained a bug which propagated to `iloc` when the latter was changed to use _reindex/takeable_ internally. The proposed solution is to remove _reindex/takeable_ functionality altogether. According to @jreback, it was never supposed to be a part of public API, so probably no deprecation procedure is necessary.
https://api.github.com/repos/pandas-dev/pandas/pulls/6614
2014-03-12T11:09:22Z
2014-03-12T18:33:43Z
2014-03-12T18:33:43Z
2014-07-16T08:58:04Z
TST: enable MySQL tests
diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 8fc289b5e1511..030dae3c50fe8 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -18,4 +18,5 @@ beautifulsoup4==4.2.1 statsmodels==0.5.0 bigquery==2.0.17 sqlalchemy==0.8.1 +pymysql==0.6.1 psycopg2==2.5.2 diff --git a/ci/requirements-3.3.txt b/ci/requirements-3.3.txt index 8e85c1108b5bf..d37bc35902976 100644 --- a/ci/requirements-3.3.txt +++ b/ci/requirements-3.3.txt @@ -16,4 +16,5 @@ scipy==0.12.0 beautifulsoup4==4.2.1 statsmodels==0.4.3 sqlalchemy==0.9.1 +pymysql==0.6.1 psycopg2==2.5.2 diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 4c0c18a0e7bd0..4c0c22da63848 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -668,7 +668,7 @@ def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col) - return table.read(coerce_float=parse_dates, + return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def drop_table(self, table_name): diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 0e26a66921df4..46b5758904315 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -495,7 +495,7 @@ def test_read_table_absent(self): self.assertRaises( ValueError, sql.read_table, "this_doesnt_exist", con=self.conn) - def test_default_type_convertion(self): + def test_default_type_conversion(self): df = sql.read_table("types_test_data", self.conn) self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), @@ -589,7 +589,7 @@ def setUp(self): self._load_test1_data() - def test_default_type_convertion(self): + def test_default_type_conversion(self): df = sql.read_table("types_test_data", self.conn) self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), @@ -755,6 +755,24 @@ def tearDown(self): for table in c.fetchall(): self.conn.execute('DROP TABLE %s' % table[0]) + def test_default_type_conversion(self): + df = sql.read_table("types_test_data", self.conn) + + self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), + "FloatCol loaded with incorrect type") + self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), + "IntCol loaded with incorrect type") + # MySQL has no real BOOL type (it's an alias for TINYINT) + self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), + "BoolCol loaded with incorrect type") + + # Int column with NA values stays as float + self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), + "IntColWithNull loaded with incorrect type") + # Bool column with NA = int column with NA values => becomes float + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), + "BoolColWithNull loaded with incorrect type") + class TestPostgreSQLAlchemy(_TestSQLAlchemy): flavor = 'postgresql'
PR to fix and enable the MySQL tests for the new sql functionality. - floats (decimal.Decimal) were not converted due to a small bug in the `coerce_float` arg. This is now OK. - MySQL has no real BOOL type (it is just an alias for tiny int). So I changed the tests for that (so a bool column is converted to int64 (or float if there are NA's) We should maybe add somewhere in the sql docs an overview of the limitations of the type conversion.
https://api.github.com/repos/pandas-dev/pandas/pulls/6613
2014-03-12T09:23:14Z
2014-03-12T13:52:25Z
2014-03-12T13:52:25Z
2014-07-07T14:23:04Z
FIX str.match uses na flag
diff --git a/doc/source/release.rst b/doc/source/release.rst index 0e6924e4b0122..cf50dd52e70e4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -186,6 +186,7 @@ Bug Fixes - Disabled clipboard tests until release time (run locally with ``nosetests -A disabled`` (:issue:`6048`). - Bug in ``DataFrame.replace()`` when passing a nested ``dict`` that contained keys not in the values to be replaced (:issue:`6342`) +- ``str.match`` ignored the na flag (:issue:`6609`). - Bug in take with duplicate columns not consolidated (:issue:`6240`) - Bug in interpolate changing dtypes (:issue:`6290`) - Bug in Series.get, was using a buggy access method (:issue:`6383`) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 3e3d1e2dbd76e..6add1767a05d6 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -364,11 +364,11 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): # Do this first, to make sure it happens even if the re.compile # raises below. warnings.warn("In future versions of pandas, match will change to" - " always return a bool indexer.""", UserWarning) + " always return a bool indexer.", UserWarning) if as_indexer and regex.groups > 0: warnings.warn("This pattern has match groups. To actually get the" - " groups, use str.extract.""", UserWarning) + " groups, use str.extract.", UserWarning) # If not as_indexer and regex.groups == 0, this returns empty lists # and is basically useless, so we will not warn. @@ -384,7 +384,7 @@ def f(x): # This is the new behavior of str_match. f = lambda x: bool(regex.match(x)) - return _na_map(f, arr) + return _na_map(f, arr, na) def str_extract(arr, pat, flags=0): @@ -887,6 +887,12 @@ def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): na=na, regex=regex) return self._wrap_result(result) + @copy(str_match) + def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=False): + result = str_match(self.series, pat, case=case, flags=flags, + na=na, as_indexer=as_indexer) + return self._wrap_result(result) + @copy(str_replace) def replace(self, pat, repl, n=-1, case=True, flags=0): result = str_replace(self.series, pat, repl, n=n, case=case, @@ -951,7 +957,6 @@ def get_dummies(self, sep='|'): startswith = _pat_wrapper(str_startswith, na=True) endswith = _pat_wrapper(str_endswith, na=True) findall = _pat_wrapper(str_findall, flags=True) - match = _pat_wrapper(str_match, flags=True) extract = _pat_wrapper(str_extract, flags=True) len = _noarg_wrapper(str_len) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 53cf3d9b5ecc5..2721edcc89e59 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -220,7 +220,7 @@ def test_contains(self): # na values = Series(['om', 'foo',np.nan]) res = values.str.contains('foo', na="foo") - self.assertEqual (res.ix[2], "foo" ) + self.assertEqual (res.ix[2], "foo") def test_startswith(self): values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo']) @@ -460,6 +460,14 @@ def test_match(self): exp = Series([True, NA, False]) tm.assert_series_equal(result, exp) + # na GH #6609 + res = Series(['a', 0, np.nan]).str.match('a', na=False) + exp = Series([True, False, False]) + assert_series_equal(exp, res) + res = Series(['a', 0, np.nan]).str.match('a') + exp = Series([True, np.nan, np.nan]) + assert_series_equal(exp, res) + def test_extract(self): # Contains tests like those in test_match and some others.
fixes #6609
https://api.github.com/repos/pandas-dev/pandas/pulls/6611
2014-03-12T06:11:39Z
2014-03-14T06:18:09Z
2014-03-14T06:18:09Z
2014-06-20T20:48:38Z
Change Panel.shift to use NDFrame.shift
diff --git a/doc/source/release.rst b/doc/source/release.rst index cd5b0cbd23353..4b87a78982662 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -152,6 +152,8 @@ API Changes - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) +- ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) + Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 057f83bff44f2..29d503dcd7718 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -196,6 +196,8 @@ API changes covs = rolling_cov(df[['A','B','C']], df[['B','C','D']], 5, pairwise=True) covs[df.index[-1]] +- ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) + MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 2bf50bb1bf142..eeb0e292c01d4 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1116,8 +1116,7 @@ def count(self, axis='major'): def shift(self, lags, freq=None, axis='major'): """ - Shift major or minor axis by specified number of leads/lags. Drops - periods right now compared with DataFrame.shift + Shift major or minor axis by specified number of leads/lags. Parameters ---------- @@ -1128,35 +1127,13 @@ def shift(self, lags, freq=None, axis='major'): ------- shifted : Panel """ - values = self.values - items = self.items - major_axis = self.major_axis - minor_axis = self.minor_axis - if freq: return self.tshift(lags, freq, axis=axis) - if lags > 0: - vslicer = slice(None, -lags) - islicer = slice(lags, None) - elif lags == 0: - vslicer = islicer = slice(None) - else: - vslicer = slice(-lags, None) - islicer = slice(None, lags) - - axis = self._get_axis_name(axis) - if axis == 'major_axis': - values = values[:, vslicer, :] - major_axis = major_axis[islicer] - elif axis == 'minor_axis': - values = values[:, :, vslicer] - minor_axis = minor_axis[islicer] - else: + if axis == 'items': raise ValueError('Invalid axis') - return self._constructor(values, items=items, major_axis=major_axis, - minor_axis=minor_axis) + return super(Panel, self).shift(lags, freq=freq, axis=axis) def tshift(self, periods=1, freq=None, axis='major', **kwds): return super(Panel, self).tshift(periods, freq, axis, **kwds) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index b2721689f574d..198e600e8edc7 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1642,7 +1642,7 @@ def test_shift(self): # negative numbers, #2164 result = self.panel.shift(-1) - expected = Panel(dict((i, f.shift(-1)[:-1]) + expected = Panel(dict((i, f.shift(-1)) for i, f in compat.iteritems(self.panel))) assert_panel_equal(result, expected) diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py new file mode 100644 index 0000000000000..6710be760e2df --- /dev/null +++ b/vb_suite/panel_methods.py @@ -0,0 +1,19 @@ +from vbench.api import Benchmark +from datetime import datetime + +common_setup = """from pandas_vb_common import * +""" + +#---------------------------------------------------------------------- +# shift + +setup = common_setup + """ +index = date_range(start="2000", freq="D", periods=1000) +panel = Panel(np.random.randn(100, len(index), 1000)) +""" + +panel_shift = Benchmark('panel.shift(1)', setup, + start_date=datetime(2012, 1, 12)) + +panel_shift_minor = Benchmark('panel.shift(1, axis=minor)', setup, + start_date=datetime(2012, 1, 12)) diff --git a/vb_suite/suite.py b/vb_suite/suite.py index 03f85da698ff8..a1b38e8509e4e 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -18,6 +18,7 @@ 'panel_ctor', 'packers', 'parser_vb', + 'panel_methods', 'plotting', 'reindex', 'replace',
closes #4867 This brings `Panel.shift` in line with `DataFrame.shift`. The shifted data retains the same dimensions/indexes and doesn't drop the empty rows. So I have two concerns. 1. This is an API change and could conceivably break existing code. 2. The perf regresses quite a bit. Here is the vbench. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- panel_shift | 578.2867 | 0.0773 | 7478.4491 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ``` The previous implementation did not create a copy and thus had a constant time. The general shift scales linearly. I think the current implementation just defers work done in later alignments, but I can only speak to my own workflow.
https://api.github.com/repos/pandas-dev/pandas/pulls/6605
2014-03-11T20:10:41Z
2014-04-01T20:55:05Z
2014-04-01T20:55:04Z
2014-06-29T18:16:04Z
ENH: Make sure makeMissing* works as expected.
diff --git a/pandas/util/testing.py b/pandas/util/testing.py index a0876179ee4af..2860cdf3b200d 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -941,8 +941,8 @@ def _gen_unique_rand(rng, _extra_size): extra_size *= 1.05 ind = _gen_unique_rand(random_state, extra_size) - j = np.floor(ind * 1. / nrows) - i = (ind - j * nrows) + j = np.floor(ind * 1. / nrows).astype(int) + i = (ind - j * nrows).astype(int) return i.tolist(), j.tolist() @@ -973,7 +973,7 @@ def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None, r_idx_type=r_idx_type) i, j = _create_missing_idx(nrows, ncols, density, random_state) - df.iloc[i, j] = np.nan + df.values[i, j] = np.nan return df @@ -981,7 +981,7 @@ def makeMissingDataframe(density=.9, random_state=None): df = makeDataFrame() i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state) - df.iloc[i, j] = np.nan + df.values[i, j] = np.nan return df
https://api.github.com/repos/pandas-dev/pandas/pulls/6603
2014-03-11T18:01:45Z
2014-03-11T18:50:06Z
2014-03-11T18:50:06Z
2014-07-16T08:57:55Z
BUG: Bug in popping from a Series (GH6600)
diff --git a/doc/source/release.rst b/doc/source/release.rst index c42c9920efef1..434477d071c4b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -228,7 +228,7 @@ Bug Fixes - Bug in ``.xs`` with a ``nan`` in level when dropped (:issue:`6574`) - Bug in fillna with method = 'bfill/ffill' and ``datetime64[ns]`` dtype (:issue:`6587`) - Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) - +- Bug in popping from a Series (:issue:`6600`) pandas 0.13.1 ------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 2a4ee4dcf8cf6..a5b9e874cea41 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3733,11 +3733,21 @@ def reindex_axis0_with_method(self, new_axis, indexer=None, method=None, def _delete_from_block(self, i, item): super(SingleBlockManager, self)._delete_from_block(i, item) - # reset our state - self._block = ( - self.blocks[0] if len(self.blocks) else - make_block(np.array([], dtype=self._block.dtype), [], []) - ) + # possibly need to merge split blocks + if len(self.blocks) > 1: + new_items = Index(list(itertools.chain(*[ b.items for b in self.blocks ]))) + block = make_block(np.concatenate([ b.values for b in self.blocks ]), + new_items, + new_items, + dtype=self._block.dtype) + + elif len(self.blocks): + block = self.blocks[0] + else: + block = make_block(np.array([], dtype=self._block.dtype), [], []) + + self.blocks = [block] + self._block = block self._values = self._block.values def get_slice(self, slobj): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index ca1b23ee26da4..93fa739f7f218 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -710,6 +710,21 @@ def test_setindex(self): def test_array_finalize(self): pass + def test_pop(self): + # GH 6600 + df = DataFrame({ + 'A': 0, + 'B': np.arange(5,dtype='int64'), + 'C': 0, + }) + k = df.iloc[4] + + result = k.pop('B') + self.assertEqual(result, 4) + + expected = Series([0,0],index=['A','C']) + assert_series_equal(k, expected) + def test_not_hashable(self): s_empty = Series() s = Series([1])
closes #6600
https://api.github.com/repos/pandas-dev/pandas/pulls/6601
2014-03-11T17:43:46Z
2014-03-11T19:00:54Z
2014-03-11T19:00:54Z
2014-06-30T13:50:31Z
SQL: move df.to_sql to generic + update with new functionality
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8a72b0d7c4493..4f23fd69d7621 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1261,25 +1261,6 @@ def to_stata( time_stamp=time_stamp, data_label=data_label) writer.write_file() - def to_sql(self, name, con, flavor='sqlite', if_exists='fail', **kwargs): - """ - Write records stored in a DataFrame to a SQL database. - - Parameters - ---------- - name : str - Name of SQL table - conn : an open SQL database connection object - flavor: {'sqlite', 'mysql', 'oracle'}, default 'sqlite' - if_exists: {'fail', 'replace', 'append'}, default 'fail' - - fail: If table exists, do nothing. - - replace: If table exists, drop it, recreate it, and insert data. - - append: If table exists, insert data. Create if does not exist. - """ - from pandas.io.sql import write_frame - write_frame( - self, name, con, flavor=flavor, if_exists=if_exists, **kwargs) - @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0a8f57c581d92..4b28e6a09184a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -908,6 +908,33 @@ def to_msgpack(self, path_or_buf=None, **kwargs): from pandas.io import packers return packers.to_msgpack(path_or_buf, self, **kwargs) + def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True): + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + name : string + Name of SQL table + con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object is given, a supported SQL flavor must also be provided + flavor : {'sqlite', 'mysql'}, default 'sqlite' + The flavor of SQL to use. Ignored when using SQLAlchemy engine. + Required when using DBAPI2 connection. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default True + Write DataFrame index as a column + + """ + from pandas.io import sql + sql.to_sql( + self, name, con, flavor=flavor, if_exists=if_exists, index=index) + def to_pickle(self, path): """ Pickle (serialize) object to input file path diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 4c0c22da63848..f17820b06ce5e 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -11,7 +11,7 @@ import pandas.core.common as com from pandas.compat import lzip, map, zip, raise_with_traceback, string_types -from pandas.core.api import DataFrame +from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime @@ -253,6 +253,12 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True): Write DataFrame index as a column """ pandas_sql = pandasSQL_builder(con, flavor=flavor) + + if isinstance(frame, Series): + frame = frame.to_frame() + elif not isinstance(frame, DataFrame): + raise NotImplementedError + pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 9ecb605def400..8e045db0315cb 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -337,6 +337,18 @@ def test_to_sql_append(self): self.assertEqual( num_rows, num_entries, "not the same number of rows as entries") + def test_to_sql_series(self): + s = Series(np.arange(5, dtype='int64'), name='series') + sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False) + s2 = sql.read_sql("SELECT * FROM test_series", self.conn, + flavor='sqlite') + tm.assert_frame_equal(s.to_frame(), s2) + + def test_to_sql_panel(self): + panel = tm.makePanel() + self.assertRaises(NotImplementedError, sql.to_sql, panel, + 'test_panel', self.conn, flavor='sqlite') + def test_legacy_write_frame(self): """Test legacy write frame name. Assume that functionality is already tested above so just do quick check that it basically works"""
Initial commit for this. Questions: - To catch Panels (and raiseNotImplementedError), should I do this in sql.to_sql or in NDFrame.to_sql? - To support Series, I can just call `to_frame` on it. Is this OK? And should add tests for this.
https://api.github.com/repos/pandas-dev/pandas/pulls/6598
2014-03-11T12:50:00Z
2014-03-16T14:37:42Z
2014-03-16T14:37:42Z
2014-07-02T11:06:35Z
FIX: Bug whereby array_equivalent was not correctly comparing Float64Ind...
diff --git a/pandas/core/common.py b/pandas/core/common.py index 60a533db01f7f..46ca371284ae4 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -311,11 +311,14 @@ def array_equivalent(left, right): >>> array_equivalent(np.array([1, nan, 2]), np.array([1, 2, nan])) False """ + left, right = np.asarray(left), np.asarray(right) if left.shape != right.shape: return False # NaNs occur only in object arrays, float or complex arrays. + if issubclass(left.dtype.type, np.object_): + return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all() if not issubclass(left.dtype.type, (np.floating, np.complexfloating)): return np.array_equal(left, right) - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() def _iterable_not_string(x): return (isinstance(x, collections.Iterable) and diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 3b3b2becc82db..59bfce8d9d636 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -5,7 +5,7 @@ from nose.tools import assert_equal import numpy as np from pandas.tslib import iNaT, NaT -from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp +from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index from pandas import compat from pandas.compat import range, long, lrange, lmap, u from pandas.core.common import notnull, isnull, array_equivalent @@ -181,7 +181,11 @@ def test_array_equivalent(): assert not array_equivalent(np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan])) assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e'])) - + assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan])) + assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan])) + assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan])) + assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan])) + def test_datetimeindex_from_empty_datetime64_array(): for unit in [ 'ms', 'us', 'ns' ]: idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
Currently, ``` >>> import pandas.core.common as com >>> com.array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan])) False ``` Although the current pandas code base does not use `array_equivalent` to compare Float64Indexes, leaving `array_equivalent` in its current state may be a bug waiting to happen. This PR attempts to fix the problem by using `pd.isnull` for all arrays of dtype `object`. In a previous PR I tried this and got terrible perf results. Since then I've discovered that my machine does not have enough memory to run the full perf test suit without page faults. If I rerun `test_perf.sh` for just a few Benchmarks, I can avoid the page faults and get consistent results. Running `/usr/bin/time -v ./test_perf.sh -b master -t fix-equivalent` yielded two tests with ratio > 1.1. ``` ------------------------------------------------------------------------------- reindex_fillna_pad | 0.5784 | 0.5034 | 1.1490 | packers_write_pack | 15.2360 | 7.1851 | 2.1205 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ``` which I believe were due to page faults. When I reran perf on just these tests using `/usr/bin/time -v ./test_perf.sh -b master -t fix-equivalent -r "reindex_fillna_pad|packers_write_pack"` I got ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- reindex_fillna_pad_float32 | 0.4633 | 0.4590 | 1.0093 | packers_write_pack | 7.9544 | 7.8390 | 1.0147 | reindex_fillna_pad | 0.7290 | 0.7180 | 1.0154 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6597
2014-03-11T12:26:58Z
2014-03-11T12:56:40Z
2014-03-11T12:56:40Z
2014-07-16T08:57:49Z
Gbq unicode support
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index cfee48d62928b..932ed4e1672b7 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -327,6 +327,7 @@ Enhancements - ``DataFrame.to_stata`` and ``StataWriter`` will accept keyword arguments time_stamp and data_label which allow the time stamp and dataset label to be set when creating a file. (:issue:`6545`) +- ``pandas.io.gbq`` now handles reading unicode strings properly. (:issue:`5940`) Performance ~~~~~~~~~~~ diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index ebf4f17ffb852..60381a2a628c2 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -9,6 +9,7 @@ from datetime import datetime import pkg_resources from distutils.version import LooseVersion +from pandas.compat import u import pandas as pd import numpy as np @@ -117,9 +118,8 @@ def _parse_entry(field_value, field_type): field_value = np.datetime64(timestamp) elif field_type == 'BOOLEAN': field_value = field_value == 'true' - # Note that results are unicode, so this will - # fail for non-ASCII characters.. this probably - # functions differently in Python 3 + elif field_type == 'STRING': + field_value = field_value else: field_value = str(field_value) return field_value diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index ec051d008b3f3..124658ac80234 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -11,6 +11,7 @@ from pandas.core.frame import DataFrame from pandas.util.testing import with_connectivity_check +from pandas.compat import u from pandas import NaT @@ -193,9 +194,28 @@ def test_type_conversion(self): np.bool(False), np.int('2'), np.float('3.14159'), - 'Hello World'] + u('Hello World')] self.assertEqual(actual_output, sample_output, 'A format conversion failed') + @with_connectivity_check + def test_unicode_string_conversion(self): + # Strings from BigQuery Should be converted to UTF-8 properly + + if not os.path.exists(self.bq_token): + raise nose.SkipTest('Skipped because authentication information is not available.') + + correct_test_datatype = DataFrame( + {'UNICODE_STRING' : [u("\xe9\xfc")]} + ) + + query = """SELECT '\xc3\xa9\xc3\xbc' as UNICODE_STRING""" + + client = gbq._authenticate() + a = gbq.read_gbq(query) + tm.assert_frame_equal(a, correct_test_datatype) + + + def test_data_small(self): # Parsing a fixed page of data should return the proper fixed np.array() result_frame = gbq._parse_page(self.test_data_small,
Resolves Issue https://github.com/pydata/pandas/issues/5940 .
https://api.github.com/repos/pandas-dev/pandas/pulls/6596
2014-03-11T04:25:01Z
2014-03-14T02:32:54Z
2014-03-14T02:32:54Z
2014-06-14T18:12:35Z
FIX filter selects selected columns
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 1bdb3973ee92c..86590d2319447 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2529,7 +2529,7 @@ def filter(self, func, dropna=True, *args, **kwargs): indices = [] - obj = self._obj_with_exclusions + obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) fast_path, slow_path = self._define_paths(func, *args, **kwargs) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 3b613bb1705a3..adca8389b8939 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -3438,6 +3438,13 @@ def test_filter_and_transform_with_non_unique_string_index(self): actual = grouped_df.pid.transform(len) assert_series_equal(actual, expected) + def test_filter_has_access_to_grouped_cols(self): + df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B']) + g = df.groupby('A') + # previously didn't have access to col A #???? + filt = g.filter(lambda x: x['A'].sum() == 2) + assert_frame_equal(filt, df.iloc[[0, 1]]) + def test_index_label_overlaps_location(self): # checking we don't have any label/location confusion in the # the wake of GH5375 @@ -3486,7 +3493,8 @@ def test_groupby_selection_with_methods(self): 'idxmin', 'idxmax', 'ffill', 'bfill', 'pct_change', - 'tshift' + 'tshift', + #'ohlc' ] for m in methods: @@ -3501,8 +3509,11 @@ def test_groupby_selection_with_methods(self): g_exp.apply(lambda x: x.sum())) assert_frame_equal(g.resample('D'), g_exp.resample('D')) + assert_frame_equal(g.resample('D', how='ohlc'), + g_exp.resample('D', how='ohlc')) - + assert_frame_equal(g.filter(lambda x: len(x) == 3), + g_exp.filter(lambda x: len(x) == 3)) def test_groupby_whitelist(self): from string import ascii_lowercase
fixes #6512 TST for selected groupby add resample ohlc and filter add couple of tests for #5264
https://api.github.com/repos/pandas-dev/pandas/pulls/6593
2014-03-11T00:40:38Z
2014-03-11T00:54:21Z
2014-03-11T00:54:21Z
2014-07-02T01:18:18Z
Improve performance for custom business days (GH6584)
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ea321cbab545a..490bb155ba814 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -343,6 +343,7 @@ Enhancements and data_label which allow the time stamp and dataset label to be set when creating a file. (:issue:`6545`) - ``pandas.io.gbq`` now handles reading unicode strings properly. (:issue:`5940`) +- Improve performance of ``CustomBusinessDay`` (:issue:`6584`) Performance ~~~~~~~~~~~ diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 299d532c20b08..eb40f1f520cff 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -453,32 +453,38 @@ class CustomBusinessDay(BusinessDay): _prefix = 'C' def __init__(self, n=1, **kwds): - # Check we have the required numpy version - from distutils.version import LooseVersion - - if LooseVersion(np.__version__) < '1.7.0': - raise NotImplementedError("CustomBusinessDay requires numpy >= " - "1.7.0. Current version: " + - np.__version__) - self.n = int(n) self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) self.normalize = kwds.get('normalize', False) self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') - holidays = kwds.get('holidays', []) + holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in holidays] self.holidays = tuple(sorted(holidays)) self.kwds['holidays'] = self.holidays + self._set_busdaycalendar() def _set_busdaycalendar(self): - holidays = np.array(self.holidays, dtype='datetime64[D]') - self.busdaycalendar = np.busdaycalendar(holidays=holidays, - weekmask=self.weekmask) - + if self.holidays: + kwargs = {'weekmask':self.weekmask,'holidays':self.holidays} + else: + kwargs = {'weekmask':self.weekmask} + try: + self.busdaycalendar = np.busdaycalendar(**kwargs) + except: + # Check we have the required numpy version + from distutils.version import LooseVersion + + if LooseVersion(np.__version__) < '1.7.0': + raise NotImplementedError("CustomBusinessDay requires numpy >= " + "1.7.0. Current version: " + + np.__version__) + else: + raise + def __getstate__(self): """"Return a pickleable state""" state = self.__dict__.copy() @@ -490,52 +496,71 @@ def __setstate__(self, state): self.__dict__ = state self._set_busdaycalendar() - @staticmethod - def _to_dt64(dt, dtype='datetime64'): - if isinstance(dt, (datetime, compat.string_types)): - dt = np.datetime64(dt, dtype=dtype) - if isinstance(dt, np.datetime64): - dt = dt.astype(dtype) + def apply(self, other): + if self.n <= 0: + roll = 'forward' else: - raise TypeError('dt must be datestring, datetime or datetime64') - return dt + roll = 'backward' - def apply(self, other): + # Distinguish input cases to enhance performance if isinstance(other, datetime): dtype = type(other) + date_in = other + np_dt = np.datetime64(date_in.date()) + + np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll, + busdaycal=self.busdaycalendar) + + dt_date = np_incr_dt.astype(datetime) + if not self.normalize: + result = datetime.combine(dt_date,date_in.time()) + else: + result = dt_date + + if self.offset: + result = result + self.offset + + return result + elif isinstance(other, np.datetime64): dtype = other.dtype + date_in = other + np_day = date_in.astype('datetime64[D]') + np_time = date_in - np_day + + np_incr_dt = np.busday_offset(np_day, self.n, roll=roll, + busdaycal=self.busdaycalendar) + + if not self.normalize: + result = np_day_incr + np_time + else: + result = np_incr_dt + + if self.offset: + result = result + self.offset + + return result + elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) else: raise ApplyTypeError('Only know how to combine trading day with ' 'datetime, datetime64 or timedelta.') - dt64 = self._to_dt64(other) - - day64 = dt64.astype('datetime64[D]') - time = dt64 - day64 - - if self.n <= 0: - roll = 'forward' - else: - roll = 'backward' - - result = np.busday_offset(day64, self.n, roll=roll, - busdaycal=self.busdaycalendar) - if not self.normalize: - result = result + time - - result = result.astype(dtype) - - if self.offset: - result = result + self.offset - - return result + @staticmethod + def _to_dt64(dt, dtype='datetime64'): + # Currently + # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]') + # numpy.datetime64('2013-05-01T02:00:00.000000+0200') + # Thus astype is needed to cast datetime to datetime64[D] + dt = np.datetime64(dt) + if dt.dtype.name != dtype: + dt = dt.astype(dtype) + return dt def onOffset(self, dt): - day64 = self._to_dt64(dt).astype('datetime64[D]') + day64 = self._to_dt64(dt,'datetime64[D]') return np.is_busday(day64, busdaycal=self.busdaycalendar) diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index 93821c3be3c2c..fafa7f75501d9 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -281,3 +281,17 @@ def date_range(start=None, end=None, periods=None, freq=None): Benchmark('DatetimeConverter.convert(rng, None, None)', setup, start_date=datetime(2013, 1, 1)) +# Adding custom business day +setup = common_setup + """ +import datetime as dt +import pandas as pd + +date = dt.datetime(2011,1,1) +cday = pd.offsets.CustomBusinessDay() +""" +timeseries_custom_bday_incr = \ + Benchmark("date + cday",setup) + +# Increment by n +timeseries_custom_bday_incr_n = \ + Benchmark("date + 10 * cday",setup)
closes #6584 Tests are passing. Incrementing datetime with custom dateoffset is at about twice the speed now. This could probably be optimized more.
https://api.github.com/repos/pandas-dev/pandas/pulls/6592
2014-03-10T22:48:22Z
2014-03-18T10:08:23Z
2014-03-18T10:08:23Z
2014-06-13T22:07:35Z
BUG: replace iterrows with itertuples in sql insert (GH6509)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 0e6924e4b0122..c42c9920efef1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -227,6 +227,8 @@ Bug Fixes - Series.quantile raising on an ``object`` dtype (:issue:`6555`) - Bug in ``.xs`` with a ``nan`` in level when dropped (:issue:`6574`) - Bug in fillna with method = 'bfill/ffill' and ``datetime64[ns]`` dtype (:issue:`6587`) +- Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) + pandas 0.13.1 ------------- diff --git a/pandas/io/sql.py b/pandas/io/sql.py index cddcb4d72373b..4c0c18a0e7bd0 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -423,16 +423,17 @@ def insert(self): ins = self.insert_statement() data_list = [] # to avoid if check for every row + keys = self.frame.columns if self.index is not None: - for t in self.frame.iterrows(): + for t in self.frame.itertuples(): data = dict((k, self.maybe_asscalar(v)) - for k, v in t[1].iteritems()) + for k, v in zip(keys, t[1:])) data[self.index] = self.maybe_asscalar(t[0]) data_list.append(data) else: - for t in self.frame.iterrows(): + for t in self.frame.itertuples(): data = dict((k, self.maybe_asscalar(v)) - for k, v in t[1].iteritems()) + for k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) @@ -758,8 +759,8 @@ def insert_statement(self): def insert(self): ins = self.insert_statement() cur = self.pd_sql.con.cursor() - for r in self.frame.iterrows(): - data = [self.maybe_asscalar(v) for v in r[1].values] + for r in self.frame.itertuples(): + data = [self.maybe_asscalar(v) for v in r[1:]] if self.index is not None: data.insert(0, self.maybe_asscalar(r[0])) cur.execute(ins, tuple(data)) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2be086cddf7c4..0e26a66921df4 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -7,7 +7,7 @@ import nose import numpy as np -from pandas import DataFrame +from pandas import DataFrame, Series from pandas.compat import range, lrange, iteritems #from pandas.core.datetools import format as date_format @@ -554,6 +554,18 @@ def test_date_parsing(self): self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") + def test_mixed_dtype_insert(self): + # see GH6509 + s1 = Series(2**25 + 1,dtype=np.int32) + s2 = Series(0.0,dtype=np.float32) + df = DataFrame({'s1': s1, 's2': s2}) + + # write and read again + df.to_sql("test_read_write", self.conn, index=False) + df2 = sql.read_table("test_read_write", self.conn) + + tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) + class TestSQLAlchemy(_TestSQLAlchemy): """ diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 007dc8af5ed12..a0876179ee4af 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -499,12 +499,18 @@ def is_sorted(seq): def assert_series_equal(left, right, check_dtype=True, check_index_type=False, check_series_type=False, - check_less_precise=False): + check_less_precise=False, + check_exact=False): if check_series_type: assert_isinstance(left, type(right)) if check_dtype: assert_attr_equal('dtype', left, right) - assert_almost_equal(left.values, right.values, check_less_precise) + if check_exact: + if not np.array_equal(left.values, right.values): + raise AssertionError('{0} is not equal to {1}.'.format(left.values, + right.values)) + else: + assert_almost_equal(left.values, right.values, check_less_precise) if check_less_precise: assert_almost_equal( left.index.values, right.index.values, check_less_precise) @@ -522,7 +528,8 @@ def assert_frame_equal(left, right, check_dtype=True, check_frame_type=False, check_less_precise=False, check_names=True, - by_blocks=False): + by_blocks=False, + check_exact=False): if check_frame_type: assert_isinstance(left, type(right)) assert_isinstance(left, DataFrame) @@ -555,7 +562,8 @@ def assert_frame_equal(left, right, check_dtype=True, assert_series_equal(lcol, rcol, check_dtype=check_dtype, check_index_type=check_index_type, - check_less_precise=check_less_precise) + check_less_precise=check_less_precise, + check_exact=check_exact) if check_index_type: assert_isinstance(left.index, type(right.index))
Fixes #6509. Should still add a test
https://api.github.com/repos/pandas-dev/pandas/pulls/6591
2014-03-10T20:57:44Z
2014-03-11T12:05:51Z
2014-03-11T12:05:51Z
2014-06-20T10:46:15Z