title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
ENH: non-nano Timestamp.timestamp, to_period | diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 8cc7bcb2a1aad..833ba4ce70bd7 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -6,6 +6,7 @@ from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT
cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
+cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
cdef dict attrname_to_abbrevs
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 0c4d4c5c235b5..3be21ba754f27 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -348,6 +348,19 @@ cdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns) e
return day_units
+cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1:
+ if reso == NPY_DATETIMEUNIT.NPY_FR_ns:
+ return 1_000_000_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
+ return 1_000_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
+ return 1_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
+ return 1
+ else:
+ raise NotImplementedError(reso)
+
+
cdef dict _reso_str_map = {
Resolution.RESO_NS.value: "nanosecond",
Resolution.RESO_US.value: "microsecond",
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 923d1f830e1a9..fcc9390a2cccd 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -52,7 +52,11 @@ from pandas._libs.tslibs.conversion cimport (
convert_datetime_to_tsobject,
convert_to_tsobject,
)
-from pandas._libs.tslibs.dtypes cimport npy_unit_to_abbrev
+from pandas._libs.tslibs.dtypes cimport (
+ npy_unit_to_abbrev,
+ periods_per_day,
+ periods_per_second,
+)
from pandas._libs.tslibs.util cimport (
is_array,
is_datetime64_object,
@@ -811,11 +815,12 @@ cdef class _Timestamp(ABCTimestamp):
cdef:
local_val = self._maybe_convert_value_to_local()
int64_t normalized
+ int64_t ppd = periods_per_day(self._reso)
if self._reso != NPY_FR_ns:
raise NotImplementedError(self._reso)
- normalized = normalize_i8_stamp(local_val)
+ normalized = normalize_i8_stamp(local_val, ppd)
return Timestamp(normalized).tz_localize(self.tzinfo)
# -----------------------------------------------------------------
@@ -834,8 +839,8 @@ cdef class _Timestamp(ABCTimestamp):
if len(state) == 3:
# pre-non-nano pickle
+ # TODO: no tests get here 2022-05-10
reso = NPY_FR_ns
- assert False # checking for coverage
else:
reso = state[4]
self._reso = reso
@@ -982,10 +987,10 @@ cdef class _Timestamp(ABCTimestamp):
"""
# GH 17329
# Note: Naive timestamps will not match datetime.stdlib
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
- return round(self.value / 1e9, 6)
+ denom = periods_per_second(self._reso)
+
+ return round(self.value / denom, 6)
cpdef datetime to_pydatetime(_Timestamp self, bint warn=True):
"""
@@ -1080,9 +1085,6 @@ cdef class _Timestamp(ABCTimestamp):
"""
from pandas import Period
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
if self.tz is not None:
# GH#21333
warnings.warn(
@@ -2252,16 +2254,18 @@ Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365
@cython.cdivision(False)
-cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil:
+cdef inline int64_t normalize_i8_stamp(int64_t local_val, int64_t ppd) nogil:
"""
Round the localized nanosecond timestamp down to the previous midnight.
Parameters
----------
local_val : int64_t
+ ppd : int64_t
+ Periods per day in the Timestamp's resolution.
Returns
-------
int64_t
"""
- return local_val - (local_val % ccalendar.DAY_NANOS)
+ return local_val - (local_val % ppd)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index c892816629462..108d58bcc251d 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -840,3 +840,11 @@ def test_to_datetime64(self, dt64, ts):
res = ts.to_datetime64()
assert res == dt64
assert res.dtype == dt64.dtype
+
+ def test_timestamp(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.timestamp() == alt.timestamp()
+
+ def test_to_period(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.to_period("D") == alt.to_period("D")
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46990 | 2022-05-10T21:55:56Z | 2022-05-11T01:58:59Z | 2022-05-11T01:58:59Z | 2022-05-11T15:10:30Z |
TST: Assign back multiple column to datetime | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index c61f3c028f129..539b56667ee07 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -21,11 +21,13 @@
Interval,
NaT,
Series,
+ Timestamp,
array,
concat,
date_range,
interval_range,
isna,
+ to_datetime,
)
import pandas._testing as tm
from pandas.api.types import is_scalar
@@ -1196,6 +1198,23 @@ def test_iloc_getitem_int_single_ea_block_view(self):
arr[2] = arr[-1]
assert ser[0] == arr[-1]
+ def test_iloc_setitem_multicolumn_to_datetime(self, using_array_manager):
+
+ # GH#20511
+ df = DataFrame({"A": ["2022-01-01", "2022-01-02"], "B": ["2021", "2022"]})
+
+ df.iloc[:, [0]] = DataFrame({"A": to_datetime(["2021", "2022"])})
+ expected = DataFrame(
+ {
+ "A": [
+ Timestamp("2021-01-01 00:00:00"),
+ Timestamp("2022-01-01 00:00:00"),
+ ],
+ "B": ["2021", "2022"],
+ }
+ )
+ tm.assert_frame_equal(df, expected, check_dtype=using_array_manager)
+
class TestILocErrors:
# NB: this test should work for _any_ Series we can pass as
| This tests make sure when converting multiple columns to datetimes
and when assiging back it remains as datetime not as unix date
as mentioned in GH #20511.
- [x] closes #20511
- [x] [Tests added and passed]
- [x] All [code checks passed]
| https://api.github.com/repos/pandas-dev/pandas/pulls/46982 | 2022-05-10T06:24:57Z | 2022-06-05T23:44:41Z | 2022-06-05T23:44:41Z | 2022-06-05T23:44:46Z |
CI: Move MacOS build from Azure to GHA | diff --git a/.github/workflows/windows.yml b/.github/workflows/macos-windows.yml
similarity index 70%
rename from .github/workflows/windows.yml
rename to .github/workflows/macos-windows.yml
index 6f267357554a3..560a421ec74ec 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -1,4 +1,4 @@
-name: Windows
+name: Windows-MacOS
on:
push:
@@ -21,18 +21,20 @@ env:
jobs:
pytest:
- runs-on: windows-latest
defaults:
run:
shell: bash -el {0}
timeout-minutes: 90
strategy:
matrix:
+ os: [macos-latest, windows-latest]
env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
fail-fast: false
+ runs-on: ${{ matrix.os }}
+ name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }}
concurrency:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
- group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }}
cancel-in-progress: true
steps:
@@ -47,10 +49,17 @@ jobs:
mamba-version: "*"
channels: conda-forge
activate-environment: pandas-dev
- channel-priority: strict
+ channel-priority: ${{ matrix.os == 'macos-latest' && 'flexible' || 'strict' }}
environment-file: ci/deps/${{ matrix.env_file }}
use-only-tar-bz2: true
+ # ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
+ # Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
+ # Reason: image not found
+ - name: Upgrade pyarrow on MacOS
+ run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
+ if: ${{ matrix.os == 'macos-latest' }}
+
- name: Build Pandas
uses: ./.github/actions/build_pandas
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index b2ae620019962..37df662df8edc 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -22,11 +22,6 @@ variables:
PANDAS_CI: 1
jobs:
-- template: ci/azure/posix.yml
- parameters:
- name: macOS
- vmImage: macOS-10.15
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml
deleted file mode 100644
index df1d5049be33d..0000000000000
--- a/ci/azure/posix.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin'
- displayName: 'Set conda path'
-
- - script: rm /usr/local/miniconda/pkgs/cache/*.json
- displayName: 'Workaround for mamba-org/mamba#488'
-
- - script: ci/setup_env.sh
- displayName: 'Setup environment and build pandas'
-
- - script: |
- conda run -n pandas-dev --no-capture-output ci/run_tests.sh
- displayName: 'Test'
-
- - script: |
- pushd /tmp
- conda run -n pandas-dev python -c "import pandas; pandas.show_versions()"
- popd
- displayName: 'Build versions'
-
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index a85767eb6f1b4..483353cfcb3cd 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -73,15 +73,6 @@ mamba install -n pandas-dev 'setuptools<60'
echo "conda list -n pandas-dev"
conda list -n pandas-dev
-# From pyarrow on MacOS
-# ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib
-# Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib
-# Reason: image not found
-if [[ "$(uname)" == 'Darwin' ]]; then
- echo "Update pyarrow for pyarrow on MacOS"
- conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6
-fi
-
if [[ "$BITS32" == "yes" ]]; then
# activate 32-bit compiler
export CONDA_BUILD=1
| - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Similar to https://github.com/pandas-dev/pandas/pull/46960
| https://api.github.com/repos/pandas-dev/pandas/pulls/46981 | 2022-05-10T02:38:18Z | 2022-05-12T15:54:27Z | 2022-05-12T15:54:27Z | 2022-05-13T16:00:47Z |
TST: avoid chained assignment in tests outside of specific tests on chaining | diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index daddca7891b93..47ebca0b9bf5c 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -66,7 +66,7 @@ def test_combine_first(self, float_frame):
assert (combined["A"][:10] == 1).all()
# reverse overlap
- tail["A"][:10] = 0
+ tail.iloc[:10, tail.columns.get_loc("A")] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 2f0a4195d2f74..ee9af3f436943 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -27,8 +27,8 @@ def test_cov(self, float_frame, float_string_frame):
# with NAs
frame = float_frame.copy()
- frame["A"][:5] = np.nan
- frame["B"][5:10] = np.nan
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
+ frame.iloc[5:10, frame.columns.get_loc("B")] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 7c33242192d2e..3e22734992d23 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1333,7 +1333,8 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
- frame_copy["C"][:5] = np.nan
+ # adding NAs to first 5 values of column "C"
+ frame_copy.loc[: frame_copy.index[4], "C"] = np.nan
added = float_frame + frame_copy
diff --git a/pandas/tests/groupby/test_apply_mutate.py b/pandas/tests/groupby/test_apply_mutate.py
index 36e117cf03353..d1f25aabe31a2 100644
--- a/pandas/tests/groupby/test_apply_mutate.py
+++ b/pandas/tests/groupby/test_apply_mutate.py
@@ -72,7 +72,7 @@ def test_apply_function_with_indexing():
)
def fn(x):
- x.col2[x.index[-1]] = 0
+ x.loc[x.index[-1], "col2"] = 0
return x.col2
result = df.groupby(["col1"], as_index=False).apply(fn)
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index 9d5e65e692fdc..a57b363c0a448 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -128,7 +128,7 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
df = ymd.copy()
exp = ymd.copy()
df.loc[2000, 4] = 0
- exp.loc[2000, 4].values[:] = 0
+ exp.iloc[65:85] = 0
tm.assert_frame_equal(df, exp)
df["A"].loc[2000, 4] = 1
@@ -136,7 +136,7 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
- exp.loc[2000].values[:] = 5
+ exp.iloc[:100] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3f8e4401808b7..1cd96bff4177d 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -53,7 +53,7 @@
def test_not_change_nan_loc(series, new_series, expected_ser):
# GH 28403
df = DataFrame({"A": series})
- df["A"].loc[:] = new_series
+ df.loc[:, "A"] = new_series
expected = DataFrame({"A": expected_ser})
tm.assert_frame_equal(df.isna(), expected)
tm.assert_frame_equal(df.notna(), ~expected)
| A small part of the test changes from #46958 that can be done separately.
We have specific tests about chained indexing (eg `test_chaining_and_caching.py`), so outside those specific tests, I can think we can avoid using chained indexing (regardless of #46958, this would follow our own recommendation on best indexing practices, although it also shows that some cases of mixed positional/label based setting is somewhat convoluted) | https://api.github.com/repos/pandas-dev/pandas/pulls/46980 | 2022-05-09T23:03:27Z | 2022-05-10T00:07:13Z | 2022-05-10T00:07:13Z | 2022-05-10T08:16:05Z |
API/TST: add tests for new copy/view behaviour | diff --git a/pandas/tests/copy_view/__init__.py b/pandas/tests/copy_view/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
new file mode 100644
index 0000000000000..16cd72cc1cb06
--- /dev/null
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -0,0 +1,649 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+import pandas.core.common as com
+from pandas.tests.copy_view.util import get_array
+
+# -----------------------------------------------------------------------------
+# Indexing operations taking subset + modifying the subset/parent
+
+
+def test_subset_column_selection(using_copy_on_write):
+ # Case: taking a subset of the columns of a DataFrame
+ # + afterwards modifying the subset
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ subset = df[["a", "c"]]
+
+ if using_copy_on_write:
+ # the subset shares memory ...
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # ... but uses CoW when being modified
+ subset.iloc[0, 0] = 0
+ else:
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # INFO this no longer raise warning since pandas 1.4
+ # with pd.option_context("chained_assignment", "warn"):
+ # with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset.iloc[0, 0] = 0
+
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_column_selection_modify_parent(using_copy_on_write):
+ # Case: taking a subset of the columns of a DataFrame
+ # + afterwards modifying the parent
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+
+ subset = df[["a", "c"]]
+ if using_copy_on_write:
+ # the subset shares memory ...
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ # ... but parent uses CoW parent when it is modified
+ df.iloc[0, 0] = 0
+
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ # different column/block still shares memory
+ assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))
+
+ expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(subset, expected)
+
+
+def test_subset_row_slice(using_copy_on_write):
+ # Case: taking a subset of the rows of a DataFrame using a slice
+ # + afterwards modifying the subset
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ subset = df[1:3]
+ subset._mgr._verify_integrity()
+
+ assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ if using_copy_on_write:
+ subset.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
+
+ else:
+ # INFO this no longer raise warning since pandas 1.4
+ # with pd.option_context("chained_assignment", "warn"):
+ # with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset.iloc[0, 0] = 0
+
+ subset._mgr._verify_integrity()
+
+ expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.iloc[1, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
+ # Case: taking a subset of the columns of a DataFrame using a slice
+ # + afterwards modifying the subset
+ single_block = (dtype == "int64") and not using_array_manager
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.iloc[:, 1:]
+ subset._mgr._verify_integrity()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
+
+ subset.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
+
+ else:
+ # we only get a warning in case of a single block
+ warn = com.SettingWithCopyWarning if single_block else None
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(warn):
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)})
+ tm.assert_frame_equal(subset, expected)
+ # original parent dataframe is not modified (also not for BlockManager case,
+ # except for single block)
+ if not using_copy_on_write and (using_array_manager or single_block):
+ df_orig.iloc[0, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+@pytest.mark.parametrize(
+ "row_indexer",
+ [slice(1, 2), np.array([False, True, True]), np.array([1, 2])],
+ ids=["slice", "mask", "array"],
+)
+@pytest.mark.parametrize(
+ "column_indexer",
+ [slice("b", "c"), np.array([False, True, True]), ["b", "c"]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_loc_rows_columns(
+ dtype, row_indexer, column_indexer, using_array_manager
+):
+ # Case: taking a subset of the rows+columns of a DataFrame using .loc
+ # + afterwards modifying the subset
+ # Generic test for several combinations of row/column indexers, not all
+ # of those could actually return a view / need CoW (so this test is not
+ # checking memory sharing, only ensuring subsequent mutation doesn't
+ # affect the parent dataframe)
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.loc[row_indexer, column_indexer]
+
+ # modifying the subset never modifies the parent
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame(
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ # a few corner cases _do_ actually modify the parent (with both row and column
+ # slice, and in case of ArrayManager or BlockManager with single block)
+ if (
+ isinstance(row_indexer, slice)
+ and isinstance(column_indexer, slice)
+ and (using_array_manager or dtype == "int64")
+ ):
+ df_orig.iloc[1, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+@pytest.mark.parametrize(
+ "row_indexer",
+ [slice(1, 3), np.array([False, True, True]), np.array([1, 2])],
+ ids=["slice", "mask", "array"],
+)
+@pytest.mark.parametrize(
+ "column_indexer",
+ [slice(1, 3), np.array([False, True, True]), [1, 2]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_iloc_rows_columns(
+ dtype, row_indexer, column_indexer, using_array_manager
+):
+ # Case: taking a subset of the rows+columns of a DataFrame using .iloc
+ # + afterwards modifying the subset
+ # Generic test for several combinations of row/column indexers, not all
+ # of those could actually return a view / need CoW (so this test is not
+ # checking memory sharing, only ensuring subsequent mutation doesn't
+ # affect the parent dataframe)
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+
+ subset = df.iloc[row_indexer, column_indexer]
+
+ # modifying the subset never modifies the parent
+ subset.iloc[0, 0] = 0
+
+ expected = DataFrame(
+ {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ # a few corner cases _do_ actually modify the parent (with both row and column
+ # slice, and in case of ArrayManager or BlockManager with single block)
+ if (
+ isinstance(row_indexer, slice)
+ and isinstance(column_indexer, slice)
+ and (using_array_manager or dtype == "int64")
+ ):
+ df_orig.iloc[1, 1] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_set_with_row_indexer(indexer_si, indexer, using_copy_on_write):
+ # Case: setting values with a row indexer on a viewing subset
+ # subset[indexer] = value and subset.iloc[indexer] = value
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
+ df_orig = df.copy()
+ subset = df[1:4]
+
+ if (
+ indexer_si is tm.setitem
+ and isinstance(indexer, np.ndarray)
+ and indexer.dtype == "int"
+ ):
+ pytest.skip("setitem with labels selects on columns")
+
+ if using_copy_on_write:
+ indexer_si(subset)[indexer] = 0
+ else:
+ # INFO iloc no longer raises warning since pandas 1.4
+ warn = com.SettingWithCopyWarning if indexer_si is tm.setitem else None
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(warn):
+ indexer_si(subset)[indexer] = 0
+
+ expected = DataFrame(
+ {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4)
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig[1:3] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_with_mask(using_copy_on_write):
+ # Case: setting values with a mask on a viewing subset: subset[mask] = value
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
+ df_orig = df.copy()
+ subset = df[1:4]
+
+ mask = subset > 3
+
+ if using_copy_on_write:
+ subset[mask] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset[mask] = 0
+
+ expected = DataFrame(
+ {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4)
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[3, "a"] = 0
+ df_orig.loc[1:3, "b"] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_column(using_copy_on_write):
+ # Case: setting a single column on a viewing subset -> subset[col] = value
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset["a"] = np.array([10, 11], dtype="int64")
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset["a"] = np.array([10, 11], dtype="int64")
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame(
+ {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)
+ )
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_set_column_with_loc(using_copy_on_write, using_array_manager, dtype):
+ # Case: setting a single column with loc on a viewing subset
+ # -> subset.loc[:, col] = value
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ raise_on_extra_warnings = False if using_array_manager else True
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning,
+ raise_on_extra_warnings=raise_on_extra_warnings,
+ ):
+ subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame(
+ {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)},
+ index=range(1, 3),
+ )
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64")
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_subset_set_column_with_loc2(using_copy_on_write, using_array_manager):
+ # Case: setting a single column with loc on a viewing subset
+ # -> subset.loc[:, col] = value
+ # separate test for case of DataFrame of a single column -> takes a separate
+ # code path
+ df = DataFrame({"a": [1, 2, 3]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, "a"] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ raise_on_extra_warnings = False if using_array_manager else True
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning,
+ raise_on_extra_warnings=raise_on_extra_warnings,
+ ):
+ subset.loc[:, "a"] = 0
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame({"a": [0, 0]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ # original parent dataframe is not modified (CoW)
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # original parent dataframe is actually updated
+ df_orig.loc[1:3, "a"] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
+)
+def test_subset_set_columns(using_copy_on_write, dtype):
+ # Case: setting multiple columns on a viewing subset
+ # -> subset[[col1, col2]] = value
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
+ )
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset[["a", "c"]] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ subset[["a", "c"]] = 0
+
+ subset._mgr._verify_integrity()
+ if using_copy_on_write:
+ # first and third column should certainly have no references anymore
+ assert all(subset._mgr._has_no_reference(i) for i in [0, 2])
+ expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))
+ tm.assert_frame_equal(subset, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice("a", "b"), np.array([True, True, False]), ["a", "b"]],
+ ids=["slice", "mask", "array"],
+)
+def test_subset_set_with_column_indexer(
+ indexer, using_copy_on_write, using_array_manager
+):
+ # Case: setting multiple columns with a column indexer on a viewing subset
+ # -> subset.loc[:, [col1, col2]] = value
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
+ df_orig = df.copy()
+ subset = df[1:3]
+
+ if using_copy_on_write:
+ subset.loc[:, indexer] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ # The (i)loc[:, col] inplace deprecation gets triggered here, ignore those
+ # warnings and only assert the SettingWithCopyWarning
+ with tm.assert_produces_warning(
+ com.SettingWithCopyWarning, raise_on_extra_warnings=False
+ ):
+ subset.loc[:, indexer] = 0
+
+ subset._mgr._verify_integrity()
+ expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3))
+ # TODO full row slice .loc[:, idx] update inplace instead of overwrite?
+ expected["b"] = expected["b"].astype("int64")
+ tm.assert_frame_equal(subset, expected)
+ if using_copy_on_write or using_array_manager:
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # In the mixed case with BlockManager, only one of the two columns is
+ # mutated in the parent frame ..
+ df_orig.loc[1:2, ["a"]] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+# TODO add more tests modifying the parent
+
+
+# -----------------------------------------------------------------------------
+# Series -- Indexing operations taking subset + modifying the subset/parent
+
+
+def test_series_getitem_slice(using_copy_on_write):
+ # Case: taking a slice of a Series + afterwards modifying the subset
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+
+ subset = s[:]
+ assert np.shares_memory(subset.values, s.values)
+
+ subset.iloc[0] = 0
+
+ if using_copy_on_write:
+ assert not np.shares_memory(subset.values, s.values)
+
+ expected = Series([0, 2, 3], index=["a", "b", "c"])
+ tm.assert_series_equal(subset, expected)
+
+ if using_copy_on_write:
+ # original parent series is not modified (CoW)
+ tm.assert_series_equal(s, s_orig)
+ else:
+ # original parent series is actually updated
+ assert s.iloc[0] == 0
+
+
+@pytest.mark.parametrize(
+ "indexer",
+ [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
+ ids=["slice", "mask", "array"],
+)
+def test_series_subset_set_with_indexer(indexer_si, indexer, using_copy_on_write):
+ # Case: setting values in a viewing Series with an indexer
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+ subset = s[:]
+
+ indexer_si(subset)[indexer] = 0
+ expected = Series([0, 0, 3], index=["a", "b", "c"])
+ tm.assert_series_equal(subset, expected)
+
+ if using_copy_on_write:
+ tm.assert_series_equal(s, s_orig)
+ else:
+ tm.assert_series_equal(s, expected)
+
+
+# -----------------------------------------------------------------------------
+# del operator
+
+
+def test_del_frame(using_copy_on_write):
+ # Case: deleting a column with `del` on a viewing child dataframe should
+ # not modify parent + update the references
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df[:]
+
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+
+ del df2["b"]
+
+ assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ tm.assert_frame_equal(df2, df_orig[["a", "c"]])
+ df2._mgr._verify_integrity()
+
+ # TODO in theory modifying column "b" of the parent wouldn't need a CoW
+ # but the weakref is still alive and so we still perform CoW
+
+ df2.loc[0, "a"] = 100
+ if using_copy_on_write:
+ # modifying child after deleting a column still doesn't update parent
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ assert df.loc[0, "a"] == 100
+
+
+def test_del_series():
+ s = Series([1, 2, 3], index=["a", "b", "c"])
+ s_orig = s.copy()
+ s2 = s[:]
+
+ assert np.shares_memory(s.values, s2.values)
+
+ del s2["a"]
+
+ assert not np.shares_memory(s.values, s2.values)
+ tm.assert_series_equal(s, s_orig)
+ tm.assert_series_equal(s2, s_orig[["b", "c"]])
+
+ # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array)
+ values = s2.values
+ s2.loc["b"] = 100
+ assert values[0] == 100
+
+
+# -----------------------------------------------------------------------------
+# Accessing column as Series
+
+
+def test_column_as_series(using_copy_on_write, using_array_manager):
+ # Case: selecting a single column now also uses Copy-on-Write
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ s = df["a"]
+
+ assert np.shares_memory(s.values, get_array(df, "a"))
+
+ if using_copy_on_write or using_array_manager:
+ s[0] = 0
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ s[0] = 0
+
+ expected = Series([0, 2, 3], name="a")
+ tm.assert_series_equal(s, expected)
+ if using_copy_on_write:
+ # assert not np.shares_memory(s.values, get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ # ensure cached series on getitem is not the changed series
+ tm.assert_series_equal(df["a"], df_orig["a"])
+ else:
+ df_orig.iloc[0, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_column_as_series_set_with_upcast(using_copy_on_write, using_array_manager):
+ # Case: selecting a single column now also uses Copy-on-Write -> when
+ # setting a value causes an upcast, we don't need to update the parent
+ # DataFrame through the cache mechanism
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+
+ s = df["a"]
+ if using_copy_on_write or using_array_manager:
+ s[0] = "foo"
+ else:
+ with pd.option_context("chained_assignment", "warn"):
+ with tm.assert_produces_warning(com.SettingWithCopyWarning):
+ s[0] = "foo"
+
+ expected = Series(["foo", 2, 3], dtype=object, name="a")
+ tm.assert_series_equal(s, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_orig)
+ # ensure cached series on getitem is not the changed series
+ tm.assert_series_equal(df["a"], df_orig["a"])
+ else:
+ df_orig["a"] = expected
+ tm.assert_frame_equal(df, df_orig)
+
+
+# TODO add tests for other indexing methods on the Series
+
+
+def test_dataframe_add_column_from_series():
+ # Case: adding a new column to a DataFrame from an existing column/series
+ # -> always already takes a copy on assignment
+ # (no change in behaviour here)
+ # TODO can we achieve the same behaviour with Copy-on-Write?
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
+
+ s = Series([10, 11, 12])
+ df["new"] = s
+ assert not np.shares_memory(get_array(df, "new"), s.values)
+
+ # editing series -> doesn't modify column in frame
+ s[0] = 0
+ expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]})
+ tm.assert_frame_equal(df, expected)
+
+ # editing column in frame -> doesn't modify series
+ df.loc[2, "new"] = 100
+ expected_s = Series([0, 11, 12])
+ tm.assert_series_equal(s, expected_s)
+
+
+# TODO add tests for constructors
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
new file mode 100644
index 0000000000000..1ed458e95b78e
--- /dev/null
+++ b/pandas/tests/copy_view/test_methods.py
@@ -0,0 +1,128 @@
+import numpy as np
+
+from pandas import DataFrame
+import pandas._testing as tm
+from pandas.tests.copy_view.util import get_array
+
+
+def test_copy(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_copy = df.copy()
+
+ # the deep copy doesn't share memory
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert df_copy._mgr.refs is None
+
+ # mutating copy doesn't mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 1
+
+
+def test_copy_shallow(using_copy_on_write):
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_copy = df.copy(deep=False)
+
+ # the shallow copy still shares memory
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert df_copy._mgr.refs is not None
+
+ if using_copy_on_write:
+ # mutating shallow copy doesn't mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 1
+ # mutating triggered a copy-on-write -> no longer shares memory
+ assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+ # but still shares memory for the other columns/blocks
+ assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c"))
+ else:
+ # mutating shallow copy does mutate original
+ df_copy.iloc[0, 0] = 0
+ assert df.iloc[0, 0] == 0
+ # and still shares memory
+ assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
+
+
+# -----------------------------------------------------------------------------
+# DataFrame methods returning new DataFrame using shallow copy
+
+
+def test_reset_index(using_copy_on_write):
+ # Case: resetting the index (i.e. adding a new column) + mutating the
+ # resulting dataframe
+ df = DataFrame(
+ {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12]
+ )
+ df_orig = df.copy()
+ df2 = df.reset_index()
+ df2._mgr._verify_integrity()
+
+ if using_copy_on_write:
+ # still shares memory (df2 is a shallow copy)
+ assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ # mutating df2 triggers a copy-on-write for that column / block
+ df2.iloc[0, 2] = 0
+ assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_rename_columns(using_copy_on_write):
+ # Case: renaming columns returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.rename(columns=str.upper)
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ df2.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
+ expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(df2, expected)
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_rename_columns_modify_parent(using_copy_on_write):
+ # Case: renaming columns returns a new dataframe
+ # + afterwards modifying the original (parent) dataframe
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df2 = df.rename(columns=str.upper)
+ df2_orig = df2.copy()
+
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ df.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
+ expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df2, df2_orig)
+
+
+def test_reindex_columns(using_copy_on_write):
+ # Case: reindexing the column returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.reindex(columns=["a", "c"])
+
+ if using_copy_on_write:
+ # still shares memory (df2 is a shallow copy)
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ else:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ # mutating df2 triggers a copy-on-write for that column
+ df2.iloc[0, 0] = 0
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ if using_copy_on_write:
+ assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
+ tm.assert_frame_equal(df, df_orig)
diff --git a/pandas/tests/copy_view/util.py b/pandas/tests/copy_view/util.py
new file mode 100644
index 0000000000000..9e358c7eec749
--- /dev/null
+++ b/pandas/tests/copy_view/util.py
@@ -0,0 +1,11 @@
+def get_array(df, col):
+ """
+ Helper method to get array for a DataFrame column.
+
+ Equivalent of df[col].values, but without going through normal getitem,
+ which triggers tracking references / CoW (and we might be testing that
+ this is done by some other operation).
+ """
+ icol = df.columns.get_loc(col)
+ assert isinstance(icol, int)
+ return df._get_column_array(icol)
| This is broken off from https://github.com/pandas-dev/pandas/pull/46958 / https://github.com/pandas-dev/pandas/pull/41878
This are the _new_ tests that I wrote for the PRs implementing the proposed new copy/view semantics with Copy-on-Write (https://github.com/pandas-dev/pandas/issues/36195). They are intended to be readable tests to follow and review the intended semantics.
They will partly be duplicating some other tests that also deal with copy/view details spread around in the test suite (see the required test edits in the abovementioned PRs), but I would say this is fine, as I think it is good to have a set of centralized tests focused on those copy/view semantics.
I broke of this part from the main PR because I think this part can be done as a pre-cursor (reducing the diff size for the main PR), and because it might be useful to review those tests as a way to review the proposed behaviours (cc @pandas-dev/pandas-core).
Some practical questions about the test organization:
- Does the general style of the tests look OK?
- I currently put those tests in `/tests/indexing/test_copy_on_write.py`, but 1) it's not only about indexing, and 2) "copy_on_write" is more the internal implementation mechanism, while the user facing thing we are testing is copy/view behaviour. So maybe I could put them in a new top-level `tests/copy_view/` test directory? (which also allows more easily to split it in multiple files) | https://api.github.com/repos/pandas-dev/pandas/pulls/46979 | 2022-05-09T22:50:06Z | 2022-05-31T17:01:57Z | 2022-05-31T17:01:57Z | 2022-05-31T22:07:19Z |
follow-up to 44787, use pandas compat for platform specifics in added test | diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 35749aabdc39f..98e136a9c4ba6 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -11,6 +11,8 @@
import pytest
+from pandas.compat import is_platform_windows
+
import pandas as pd
import pandas._testing as tm
@@ -325,5 +327,10 @@ def test_tar_gz_to_different_filename():
members = archive.getmembers()
assert len(members) == 1
content = archive.extractfile(members[0]).read().decode("utf8")
- content = content.replace("\r\n", "\n") # windows
- assert content == "foo,bar\n1,2\n"
+
+ if is_platform_windows():
+ expected = "foo,bar\r\n1,2\r\n"
+ else:
+ expected = "foo,bar\n1,2\n"
+
+ assert content == expected
| follow-up to #44787, as requested by @mroeschke in https://github.com/pandas-dev/pandas/pull/44787#issuecomment-1120292009.
Updates one added test to use `is_platform_windows()` for dealing with carriage returns. | https://api.github.com/repos/pandas-dev/pandas/pulls/46973 | 2022-05-09T07:30:29Z | 2022-05-09T18:48:15Z | 2022-05-09T18:48:15Z | 2022-05-09T18:48:26Z |
Backport PR #46960 on branch 1.4.x (CI: Move Windows build from Azure to GHA) | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index e916d5bfde5fb..5e5a3bdf0f024 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -12,6 +12,9 @@ runs:
- name: Build Pandas
run: |
- python setup.py build_ext -j 2
+ python setup.py build_ext -j $N_JOBS
python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
shell: bash -el {0}
+ env:
+ # Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
+ N_JOBS: ${{ runner.os == 'Windows' && 1 || 2 }}
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index f5cbb0e88ff11..b86dcea59edb8 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -180,7 +180,6 @@ jobs:
run: ci/run_tests.sh
# TODO: Don't continue on error for PyPy
continue-on-error: ${{ env.IS_PYPY == 'true' }}
- if: always()
- name: Build Version
run: conda list
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000000000..6f267357554a3
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,75 @@
+name: Windows
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+env:
+ PANDAS_CI: 1
+ PYTEST_TARGET: pandas
+ PYTEST_WORKERS: auto
+ PATTERN: "not slow and not db and not network and not single_cpu"
+
+
+jobs:
+ pytest:
+ runs-on: windows-latest
+ defaults:
+ run:
+ shell: bash -el {0}
+ timeout-minutes: 90
+ strategy:
+ matrix:
+ env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ fail-fast: false
+ concurrency:
+ # https://github.community/t/concurrecy-not-work-for-push/183068/7
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ cancel-in-progress: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Install Dependencies
+ uses: conda-incubator/setup-miniconda@v2.1.1
+ with:
+ mamba-version: "*"
+ channels: conda-forge
+ activate-environment: pandas-dev
+ channel-priority: strict
+ environment-file: ci/deps/${{ matrix.env_file }}
+ use-only-tar-bz2: true
+
+ - name: Build Pandas
+ uses: ./.github/actions/build_pandas
+
+ - name: Test
+ run: ci/run_tests.sh
+
+ - name: Build Version
+ run: conda list
+
+ - name: Publish test results
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ flags: unittests
+ name: codecov-pandas
+ fail_ci_if_error: false
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index d84f2d7784935..0b2a9f5b2b0cd 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -27,11 +27,6 @@ jobs:
name: macOS
vmImage: macOS-10.15
-- template: ci/azure/windows.yml
- parameters:
- name: Windows
- vmImage: windows-2019
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
deleted file mode 100644
index 02c6564579aa2..0000000000000
--- a/ci/azure/windows.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - powershell: |
- Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
- Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
- displayName: 'Add conda to PATH'
- - bash: conda install -yv -c conda-forge -n base 'mamba>=0.21.2'
- displayName: 'Install mamba'
-
- - bash: |
- # See https://github.com/mamba-org/mamba/issues/1370
- # See https://github.com/mamba-org/mamba/issues/633
- C:\\Miniconda\\condabin\\mamba.bat create -n pandas-dev
- C:\\Miniconda\\condabin\\mamba.bat env update -n pandas-dev --file ci\\deps\\actions-$(CONDA_PY).yaml
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- C:\\Miniconda\\condabin\\mamba.bat install -n pandas-dev 'setuptools<60'
- C:\\Miniconda\\condabin\\mamba.bat list -n pandas-dev
- displayName: 'Create anaconda environment'
- - bash: |
- source activate pandas-dev
- conda list
- python setup.py build_ext -q -j 2
- python -m pip install --no-build-isolation -e .
- displayName: 'Build'
- - bash: |
- source activate pandas-dev
- wmic.exe cpu get caption, deviceid, name, numberofcores, maxclockspeed
- ci/run_tests.sh
- displayName: 'Test'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
| Backport PR #46960: CI: Move Windows build from Azure to GHA | https://api.github.com/repos/pandas-dev/pandas/pulls/46971 | 2022-05-08T22:19:42Z | 2022-05-10T00:05:30Z | 2022-05-10T00:05:30Z | 2022-05-10T00:05:30Z |
TYP: enable reportOverlappingOverload | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index a27ed42c984bf..d4a2bedcfba1a 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -74,7 +74,7 @@ jobs:
- name: Install pyright
# note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.230
+ run: npm install -g pyright@1.1.245
- name: Build Pandas
id: build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bd095c03e6fdb..fac09fcf70511 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -89,7 +89,7 @@ repos:
types: [python]
stages: [manual]
# note: keep version in sync with .github/workflows/code-checks.yml
- additional_dependencies: ['pyright@1.1.230']
+ additional_dependencies: ['pyright@1.1.245']
- repo: local
hooks:
- id: flake8-rst
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index bad6013aa58b6..9b727b6278792 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -68,10 +68,12 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
left: _OrderableT,
right: _OrderableT,
closed: IntervalClosedType = ...,
- ): ...
+ ) -> None: ...
def __hash__(self) -> int: ...
@overload
- def __contains__(self: Interval[_OrderableTimesT], _OrderableTimesT) -> bool: ...
+ def __contains__(
+ self: Interval[_OrderableTimesT], key: _OrderableTimesT
+ ) -> bool: ...
@overload
def __contains__(
self: Interval[_OrderableScalarT], key: Union[int, float]
@@ -83,9 +85,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __add__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __add__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __add__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __add__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -93,9 +95,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __radd__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __radd__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __radd__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __radd__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -103,9 +105,9 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __sub__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __sub__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __sub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __sub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
@@ -113,33 +115,33 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
- def __rsub__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __rsub__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __rsub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __rsub__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __mul__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __mul__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __mul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __mul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __rmul__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __rmul__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __rmul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __rmul__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __truediv__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __truediv__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __truediv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __truediv__(self: Interval[float], y: Union[int, float]) -> Interval[float]: ...
@overload
- def __floordiv__(self: Interval[int], y: int) -> Interval[int]: ...
- @overload
- def __floordiv__(self: Interval[int], y: float) -> Interval[float]: ...
+ def __floordiv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
@overload
def __floordiv__(
self: Interval[float], y: Union[int, float]
@@ -157,7 +159,7 @@ class IntervalTree(IntervalMixin):
right: np.ndarray,
closed: IntervalClosedType = ...,
leaf_size: int = ...,
- ): ...
+ ) -> None: ...
@property
def mid(self) -> np.ndarray: ...
@property
diff --git a/pyproject.toml b/pyproject.toml
index 7b32c5f8eab49..030e6bc3c470c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -156,6 +156,7 @@ exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
# enable subset of "strict"
reportDuplicateImport = true
reportInvalidStubStatement = true
+reportOverlappingOverload = true
reportPropertyTypeMismatch = true
reportUntypedClassDecorator = true
reportUntypedFunctionDecorator = true
| reportOverlappingOverload needs a newer pyright version (otherwise it has a few false positives). I intended to update pyright together with mypy in #46905 first, but mypy still hasn't been updated on conda-forge. | https://api.github.com/repos/pandas-dev/pandas/pulls/46969 | 2022-05-08T14:33:00Z | 2022-05-09T00:06:39Z | 2022-05-09T00:06:39Z | 2022-05-26T01:59:34Z |
CLN: mmap used by only read_csv | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 15a8f2e114041..fdee1600c2a32 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -640,7 +640,7 @@ def get_handle(
.. versionchanged:: 1.4.0 Zstandard support.
memory_map : bool, default False
- See parsers._parser_params for more information.
+ See parsers._parser_params for more information. Only used by read_csv.
is_text : bool, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
@@ -659,6 +659,8 @@ def get_handle(
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding = encoding or "utf-8"
+ errors = errors or "strict"
+
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
@@ -681,6 +683,7 @@ def get_handle(
handles: list[BaseBuffer]
# memory mapping needs to be the first step
+ # only used for read_csv
handle, memory_map, handles = _maybe_memory_map(
handle,
memory_map,
@@ -1064,7 +1067,7 @@ def closed(self):
return self.fp is None
-class _MMapWrapper(abc.Iterator):
+class _CSVMMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
@@ -1079,7 +1082,7 @@ class _MMapWrapper(abc.Iterator):
def __init__(
self,
- f: IO,
+ f: ReadBuffer[bytes],
encoding: str = "utf-8",
errors: str = "strict",
decode: bool = True,
@@ -1089,11 +1092,13 @@ def __init__(
self.decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
self.decode = decode
+ # needed for compression libraries and TextIOWrapper
self.attributes = {}
for attribute in ("seekable", "readable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
+
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
@@ -1101,7 +1106,7 @@ def __getattr__(self, name: str):
return lambda: self.attributes[name]
return getattr(self.mmap, name)
- def __iter__(self) -> _MMapWrapper:
+ def __iter__(self) -> _CSVMMapWrapper:
return self
def read(self, size: int = -1) -> str | bytes:
@@ -1196,7 +1201,7 @@ def _maybe_memory_map(
memory_map: bool,
encoding: str,
mode: str,
- errors: str | None,
+ errors: str,
decode: bool,
) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
"""Try to memory map file/buffer."""
@@ -1207,25 +1212,22 @@ def _maybe_memory_map(
# need to open the file first
if isinstance(handle, str):
- if encoding and "b" not in mode:
- # Encoding
- handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
- else:
- # Binary mode
- handle = open(handle, mode)
+ handle = open(handle, "rb")
handles.append(handle)
# error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any],
# RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]"
try:
+ # open mmap, adds *-able, and convert to string
wrapped = cast(
BaseBuffer,
- _MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
+ _CSVMMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
)
finally:
for handle in reversed(handles):
# error: "BaseBuffer" has no attribute "close"
handle.close() # type: ignore[attr-defined]
+ handles = []
handles.append(wrapped)
return wrapped, memory_map, handles
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index ca6809470b2b1..22399917f2bf7 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -413,18 +413,18 @@ def test_constructor_bad_file(self, mmap_file):
err = mmap.error
with pytest.raises(err, match=msg):
- icom._MMapWrapper(non_file)
+ icom._CSVMMapWrapper(non_file)
with open(mmap_file) as target:
pass
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
- icom._MMapWrapper(target)
+ icom._CSVMMapWrapper(target)
def test_get_attr(self, mmap_file):
with open(mmap_file) as target:
- wrapper = icom._MMapWrapper(target)
+ wrapper = icom._CSVMMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs if not attr.startswith("__")]
@@ -437,7 +437,7 @@ def test_get_attr(self, mmap_file):
def test_next(self, mmap_file):
with open(mmap_file) as target:
- wrapper = icom._MMapWrapper(target)
+ wrapper = icom._CSVMMapWrapper(target)
lines = target.readlines()
for line in lines:
| Make it clear that mmap is only ever used by `read_csv`. | https://api.github.com/repos/pandas-dev/pandas/pulls/46967 | 2022-05-07T22:10:42Z | 2022-05-09T00:04:45Z | 2022-05-09T00:04:45Z | 2022-05-26T01:59:27Z |
TYP: overload asarray_tuplesafe signature | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 098b501cc95c9..2e8d6dbced4e3 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -225,14 +225,27 @@ def count_not_none(*args) -> int:
return sum(x is not None for x in args)
-def asarray_tuplesafe(values, dtype: NpDtype | None = None) -> np.ndarray:
+@overload
+def asarray_tuplesafe(
+ values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
+) -> np.ndarray:
+ # ExtensionArray can only be returned when values is an Index, all other iterables
+ # will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
+ # signature, so instead we special-case some common types.
+ ...
+
+
+@overload
+def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
+ ...
+
+
+def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
elif isinstance(values, ABCIndex):
- # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
- # expected "ndarray")
- return values._values # type: ignore[return-value]
+ return values._values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 59e55bdcb405a..8ebaaa28e13a5 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -555,10 +555,7 @@ def __new__(
subarr = com.asarray_tuplesafe(data, dtype=_dtype_obj)
if dtype is None:
# with e.g. a list [1, 2, 3] casting to numeric is _not_ deprecated
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray[Any, Any]]", variable has type
- # "ndarray[Any, Any]")
- subarr = _maybe_cast_data_without_dtype( # type: ignore[assignment]
+ subarr = _maybe_cast_data_without_dtype(
subarr, cast_numeric_deprecated=False
)
dtype = subarr.dtype
| xref https://github.com/pandas-dev/pandas/issues/37715 | https://api.github.com/repos/pandas-dev/pandas/pulls/46966 | 2022-05-07T20:08:48Z | 2022-05-09T00:05:36Z | 2022-05-09T00:05:36Z | 2022-05-09T00:05:36Z |
Fix link to be *true* raw data | diff --git a/doc/source/getting_started/intro_tutorials/includes/titanic.rst b/doc/source/getting_started/intro_tutorials/includes/titanic.rst
index 312ca48b45dd1..19b8e81914e31 100644
--- a/doc/source/getting_started/intro_tutorials/includes/titanic.rst
+++ b/doc/source/getting_started/intro_tutorials/includes/titanic.rst
@@ -26,6 +26,6 @@ consists of the following data columns:
.. raw:: html
</p>
- <a href="https://github.com/pandas-dev/pandas/tree/main/doc/data/titanic.csv" class="btn btn-dark btn-sm">To raw data</a>
+ <a href="https://github.com/pandas-dev/pandas/raw/main/doc/data/titanic.csv" class="btn btn-dark btn-sm">To raw data</a>
</div>
</div>
| Which would be handy for `wget`
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46963 | 2022-05-07T08:05:05Z | 2022-05-08T22:18:14Z | 2022-05-08T22:18:14Z | 2022-05-08T22:18:19Z |
CI: Move Windows build from Azure to GHA | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index e916d5bfde5fb..5e5a3bdf0f024 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -12,6 +12,9 @@ runs:
- name: Build Pandas
run: |
- python setup.py build_ext -j 2
+ python setup.py build_ext -j $N_JOBS
python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
shell: bash -el {0}
+ env:
+ # Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
+ N_JOBS: ${{ runner.os == 'Windows' && 1 || 2 }}
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index f5cbb0e88ff11..b86dcea59edb8 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -180,7 +180,6 @@ jobs:
run: ci/run_tests.sh
# TODO: Don't continue on error for PyPy
continue-on-error: ${{ env.IS_PYPY == 'true' }}
- if: always()
- name: Build Version
run: conda list
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000000000..6f267357554a3
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,75 @@
+name: Windows
+
+on:
+ push:
+ branches:
+ - main
+ - 1.4.x
+ pull_request:
+ branches:
+ - main
+ - 1.4.x
+ paths-ignore:
+ - "doc/**"
+
+env:
+ PANDAS_CI: 1
+ PYTEST_TARGET: pandas
+ PYTEST_WORKERS: auto
+ PATTERN: "not slow and not db and not network and not single_cpu"
+
+
+jobs:
+ pytest:
+ runs-on: windows-latest
+ defaults:
+ run:
+ shell: bash -el {0}
+ timeout-minutes: 90
+ strategy:
+ matrix:
+ env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ fail-fast: false
+ concurrency:
+ # https://github.community/t/concurrecy-not-work-for-push/183068/7
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows
+ cancel-in-progress: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Install Dependencies
+ uses: conda-incubator/setup-miniconda@v2.1.1
+ with:
+ mamba-version: "*"
+ channels: conda-forge
+ activate-environment: pandas-dev
+ channel-priority: strict
+ environment-file: ci/deps/${{ matrix.env_file }}
+ use-only-tar-bz2: true
+
+ - name: Build Pandas
+ uses: ./.github/actions/build_pandas
+
+ - name: Test
+ run: ci/run_tests.sh
+
+ - name: Build Version
+ run: conda list
+
+ - name: Publish test results
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test results
+ path: test-data.xml
+ if: failure()
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v2
+ with:
+ flags: unittests
+ name: codecov-pandas
+ fail_ci_if_error: false
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index f7c97f0554e0e..b2ae620019962 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -27,11 +27,6 @@ jobs:
name: macOS
vmImage: macOS-10.15
-- template: ci/azure/windows.yml
- parameters:
- name: Windows
- vmImage: windows-2019
-
- job: py38_32bit
pool:
vmImage: ubuntu-18.04
diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml
deleted file mode 100644
index 02c6564579aa2..0000000000000
--- a/ci/azure/windows.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-parameters:
- name: ''
- vmImage: ''
-
-jobs:
-- job: ${{ parameters.name }}
- timeoutInMinutes: 90
- pool:
- vmImage: ${{ parameters.vmImage }}
- strategy:
- matrix:
- py38:
- ENV_FILE: ci/deps/actions-38.yaml
- CONDA_PY: "38"
-
- py39:
- ENV_FILE: ci/deps/actions-39.yaml
- CONDA_PY: "39"
-
- py310:
- ENV_FILE: ci/deps/actions-310.yaml
- CONDA_PY: "310"
-
- steps:
- - powershell: |
- Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
- Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin"
- displayName: 'Add conda to PATH'
- - bash: conda install -yv -c conda-forge -n base 'mamba>=0.21.2'
- displayName: 'Install mamba'
-
- - bash: |
- # See https://github.com/mamba-org/mamba/issues/1370
- # See https://github.com/mamba-org/mamba/issues/633
- C:\\Miniconda\\condabin\\mamba.bat create -n pandas-dev
- C:\\Miniconda\\condabin\\mamba.bat env update -n pandas-dev --file ci\\deps\\actions-$(CONDA_PY).yaml
- # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941
- C:\\Miniconda\\condabin\\mamba.bat install -n pandas-dev 'setuptools<60'
- C:\\Miniconda\\condabin\\mamba.bat list -n pandas-dev
- displayName: 'Create anaconda environment'
- - bash: |
- source activate pandas-dev
- conda list
- python setup.py build_ext -q -j 2
- python -m pip install --no-build-isolation -e .
- displayName: 'Build'
- - bash: |
- source activate pandas-dev
- wmic.exe cpu get caption, deviceid, name, numberofcores, maxclockspeed
- ci/run_tests.sh
- displayName: 'Test'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- failTaskOnFailedTests: true
- testResultsFiles: 'test-data.xml'
- testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }}
- displayName: 'Publish test results'
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
Took parts from https://github.com/pandas-dev/pandas/pull/46611
| https://api.github.com/repos/pandas-dev/pandas/pulls/46960 | 2022-05-07T03:54:35Z | 2022-05-08T22:19:03Z | 2022-05-08T22:19:03Z | 2022-05-08T22:22:00Z |
ENH: Timestamp.month_name, day_name support non-nano | diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py
index 203afcdaa7378..23ae73811204c 100644
--- a/asv_bench/benchmarks/tslibs/fields.py
+++ b/asv_bench/benchmarks/tslibs/fields.py
@@ -66,9 +66,9 @@ class TimeGetStartEndField:
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
- self.dt64data = arr.view("M8[ns]")
+ self.i8data = arr
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
- get_start_end_field(self.dt64data, self.attrname, freqstr, month_kw=month_kw)
+ get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi
index 228f7dbdf5eac..e404eadf13657 100644
--- a/pandas/_libs/tslibs/fields.pyi
+++ b/pandas/_libs/tslibs/fields.pyi
@@ -10,12 +10,14 @@ def get_date_name_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
locale: str | None = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def get_start_end_field(
- dt64values: npt.NDArray[np.datetime64],
+ dtindex: npt.NDArray[np.int64],
field: str,
freqstr: str | None = ...,
month_kw: int = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.bool_]: ...
def get_date_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index e8980dc1a7553..57d4c27b3337d 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -44,6 +44,7 @@ from pandas._libs.tslibs.ccalendar cimport (
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
+ NPY_FR_ns,
dt64_to_dtstruct,
get_unit_from_dtype,
npy_datetimestruct,
@@ -139,13 +140,18 @@ def month_position_check(fields, weekdays) -> str | None:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None):
+def get_date_name_field(
+ const int64_t[:] dtindex,
+ str field,
+ object locale=None,
+ NPY_DATETIMEUNIT reso=NPY_FR_ns,
+):
"""
Given a int64-based datetime index, return array of strings of date
name based on requested field (e.g. day_name)
"""
cdef:
- Py_ssize_t i, count = len(dtindex)
+ Py_ssize_t i, count = dtindex.shape[0]
ndarray[object] out, names
npy_datetimestruct dts
int dow
@@ -163,7 +169,7 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
out[i] = np.nan
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
dow = dayofweek(dts.year, dts.month, dts.day)
out[i] = names[dow].capitalize()
@@ -178,7 +184,7 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
out[i] = np.nan
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
out[i] = names[dts.month].capitalize()
else:
@@ -201,8 +207,13 @@ cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_start_end_field(ndarray dt64values, str field,
- str freqstr=None, int month_kw=12):
+def get_start_end_field(
+ const int64_t[:] dtindex,
+ str field,
+ str freqstr=None,
+ int month_kw=12,
+ NPY_DATETIMEUNIT reso=NPY_FR_ns,
+):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
@@ -210,10 +221,11 @@ def get_start_end_field(ndarray dt64values, str field,
Parameters
----------
- dt64values : ndarray[datetime64], any resolution
+ dtindex : ndarray[int64]
field : str
frestr : str or None, default None
month_kw : int, default 12
+ reso : NPY_DATETIMEUNIT, default NPY_FR_ns
Returns
-------
@@ -221,15 +233,13 @@ def get_start_end_field(ndarray dt64values, str field,
"""
cdef:
Py_ssize_t i
- int count = dt64values.size
+ int count = dtindex.shape[0]
bint is_business = 0
int end_month = 12
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
int compare_month, modby
- ndarray dtindex = dt64values.view("i8")
- NPY_DATETIMEUNIT reso = get_unit_from_dtype(dt64values.dtype)
out = np.zeros(count, dtype='int8')
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index e7ac855d6a832..923d1f830e1a9 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -487,7 +487,6 @@ cdef class _Timestamp(ABCTimestamp):
dict kwds
ndarray[uint8_t, cast=True] out
int month_kw
- str unit
if freq:
kwds = freq.kwds
@@ -499,9 +498,8 @@ cdef class _Timestamp(ABCTimestamp):
val = self._maybe_convert_value_to_local()
- unit = npy_unit_to_abbrev(self._reso)
- out = get_start_end_field(np.array([val], dtype=f"M8[{unit}]"),
- field, freqstr, month_kw)
+ out = get_start_end_field(np.array([val], dtype=np.int64),
+ field, freqstr, month_kw, self._reso)
return out[0]
cdef _warn_on_field_deprecation(self, freq, str field):
@@ -661,12 +659,10 @@ cdef class _Timestamp(ABCTimestamp):
int64_t val
object[::1] out
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
val = self._maybe_convert_value_to_local()
+
out = get_date_name_field(np.array([val], dtype=np.int64),
- field, locale=locale)
+ field, locale=locale, reso=self._reso)
return out[0]
def day_name(self, locale=None) -> str:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 7fef934a85626..6f984727f4f6d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -38,11 +38,13 @@
tz_convert_from_utc,
tzconversion,
)
+from pandas._libs.tslibs.np_datetime import py_get_unit_from_dtype
from pandas._typing import npt
from pandas.errors import (
OutOfBoundsDatetime,
PerformanceWarning,
)
+from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_inclusive
@@ -131,7 +133,7 @@ def f(self):
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
- values.view(self._ndarray.dtype), field, self.freqstr, month_kw
+ values, field, self.freqstr, month_kw, reso=self._reso
)
else:
result = fields.get_date_field(values, field)
@@ -140,7 +142,7 @@ def f(self):
return result
if field in self._object_ops:
- result = fields.get_date_name_field(values, field)
+ result = fields.get_date_name_field(values, field, reso=self._reso)
result = self._maybe_mask_results(result, fill_value=None)
else:
@@ -544,6 +546,10 @@ def _check_compatible_with(self, other, setitem: bool = False):
# -----------------------------------------------------------------
# Descriptive Properties
+ @cache_readonly
+ def _reso(self):
+ return py_get_unit_from_dtype(self._ndarray.dtype)
+
def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
# GH#42228
value = x.view("i8")
@@ -1270,7 +1276,9 @@ def month_name(self, locale=None):
"""
values = self._local_timestamps()
- result = fields.get_date_name_field(values, "month_name", locale=locale)
+ result = fields.get_date_name_field(
+ values, "month_name", locale=locale, reso=self._reso
+ )
result = self._maybe_mask_results(result, fill_value=None)
return result
@@ -1313,7 +1321,9 @@ def day_name(self, locale=None):
"""
values = self._local_timestamps()
- result = fields.get_date_name_field(values, "day_name", locale=locale)
+ result = fields.get_date_name_field(
+ values, "day_name", locale=locale, reso=self._reso
+ )
result = self._maybe_mask_results(result, fill_value=None)
return result
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index bc9e6c0131646..c892816629462 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -753,6 +753,14 @@ def test_start_end_fields(self, ts):
assert not ts.is_month_end
assert not ts.is_month_end
+ def test_day_name(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.day_name() == alt.day_name()
+
+ def test_month_name(self, dt64, ts):
+ alt = Timestamp(dt64)
+ assert ts.month_name() == alt.month_name()
+
def test_repr(self, dt64, ts):
alt = Timestamp(dt64)
diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py
index 528d08d7f499b..9e6464f7727bd 100644
--- a/pandas/tests/tslibs/test_fields.py
+++ b/pandas/tests/tslibs/test_fields.py
@@ -28,10 +28,7 @@ def test_get_date_field_readonly(dtindex):
def test_get_start_end_field_readonly(dtindex):
- dt64values = dtindex.view("M8[ns]")
- dt64values.flags.writeable = False
-
- result = fields.get_start_end_field(dt64values, "is_month_start", None)
+ result = fields.get_start_end_field(dtindex, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
| Should also fix an asv that currently fails when run on e.g. v1.4.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/46959 | 2022-05-06T22:19:01Z | 2022-05-07T02:28:13Z | 2022-05-07T02:28:13Z | 2022-05-07T16:41:02Z |
API: New copy / view semantics using Copy-on-Write | diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index c4398efb12c3d..b8268a82d9b70 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -52,6 +52,10 @@ jobs:
extra_apt: "language-pack-zh-hans"
lang: "zh_CN.utf8"
lc_all: "zh_CN.utf8"
+ - name: "Copy-on-Write"
+ env_file: actions-310.yaml
+ pattern: "not slow and not network and not single_cpu"
+ pandas_copy_on_write: "1"
- name: "Data Manager"
env_file: actions-38.yaml
pattern: "not slow and not network and not single_cpu"
@@ -84,6 +88,7 @@ jobs:
LC_ALL: ${{ matrix.lc_all || '' }}
PANDAS_TESTING_MODE: ${{ matrix.pandas_testing_mode || '' }}
PANDAS_DATA_MANAGER: ${{ matrix.pandas_data_manager || 'block' }}
+ PANDAS_COPY_ON_WRITE: ${{ matrix.pandas_copy_on_write || '0' }}
TEST_ARGS: ${{ matrix.test_args || '' }}
PYTEST_WORKERS: ${{ contains(matrix.pattern, 'not single_cpu') && 'auto' || '1' }}
PYTEST_TARGET: ${{ matrix.pytest_target || 'pandas' }}
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index 159fdbc080fb4..94ae4a021da4d 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -1,4 +1,5 @@
from collections import defaultdict
+import weakref
cimport cython
from cpython.slice cimport PySlice_GetIndicesEx
@@ -674,8 +675,9 @@ cdef class BlockManager:
public list axes
public bint _known_consolidated, _is_consolidated
public ndarray _blknos, _blklocs
+ public list refs
- def __cinit__(self, blocks=None, axes=None, verify_integrity=True):
+ def __cinit__(self, blocks=None, axes=None, refs=None, verify_integrity=True):
# None as defaults for unpickling GH#42345
if blocks is None:
# This adds 1-2 microseconds to DataFrame(np.array([]))
@@ -687,6 +689,7 @@ cdef class BlockManager:
self.blocks = blocks
self.axes = axes.copy() # copy to make sure we are not remotely-mutable
+ self.refs = refs
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
@@ -795,12 +798,14 @@ cdef class BlockManager:
ndarray blknos, blklocs
nbs = []
+ nrefs = []
for blk in self.blocks:
nb = blk.getitem_block_index(slobj)
nbs.append(nb)
+ nrefs.append(weakref.ref(blk))
new_axes = [self.axes[0], self.axes[1]._getitem_slice(slobj)]
- mgr = type(self)(tuple(nbs), new_axes, verify_integrity=False)
+ mgr = type(self)(tuple(nbs), new_axes, nrefs, verify_integrity=False)
# We can avoid having to rebuild blklocs/blknos
blklocs = self._blklocs
@@ -813,7 +818,7 @@ cdef class BlockManager:
def get_slice(self, slobj: slice, axis: int = 0) -> BlockManager:
if axis == 0:
- new_blocks = self._slice_take_blocks_ax0(slobj)
+ new_blocks, new_refs = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
return self._get_index_slice(slobj)
else:
@@ -822,4 +827,4 @@ cdef class BlockManager:
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
- return type(self)(tuple(new_blocks), new_axes, verify_integrity=False)
+ return type(self)(tuple(new_blocks), new_axes, new_refs, verify_integrity=False)
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index c8db82500d0d6..813e8de72f96e 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -224,7 +224,7 @@ def load_newobj(self):
arr = np.array([], dtype="m8[ns]")
obj = cls.__new__(cls, arr, arr.dtype)
elif cls is BlockManager and not args:
- obj = cls.__new__(cls, (), [], False)
+ obj = cls.__new__(cls, (), [], None, False)
else:
obj = cls.__new__(cls, *args)
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 54c24b4c0b58a..e3909953d8a58 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -29,10 +29,7 @@
from decimal import Decimal
import operator
import os
-from typing import (
- Callable,
- Literal,
-)
+from typing import Callable
from dateutil.tz import (
tzlocal,
@@ -1844,8 +1841,8 @@ def using_array_manager():
@pytest.fixture
-def using_copy_on_write() -> Literal[False]:
+def using_copy_on_write() -> bool:
"""
Fixture to check if Copy-on-Write is enabled.
"""
- return False
+ return pd.options.mode.copy_on_write and pd.options.mode.data_manager == "block"
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 4434ed5a8b5f7..2579f736a9703 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -540,6 +540,26 @@ def use_inf_as_na_cb(key) -> None:
)
+# TODO better name?
+copy_on_write_doc = """
+: bool
+ Use new copy-view behaviour using Copy-on-Write. Defaults to False,
+ unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable
+ (if set to "1" for True, needs to be set before pandas is imported).
+"""
+
+
+with cf.config_prefix("mode"):
+ cf.register_option(
+ "copy_on_write",
+ # Get the default from an environment variable, if set, otherwise defaults
+ # to False. This environment variable can be set for testing.
+ os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1",
+ copy_on_write_doc,
+ validator=is_bool,
+ )
+
+
# user warnings
chained_assignment = """
: string
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 53fddb032a487..b574dee081c06 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3708,6 +3708,9 @@ def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution (for read-only purposes).
"""
return self._mgr.iget_values(i)
@@ -3715,6 +3718,9 @@ def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution (for read-only purposes).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
@@ -5145,7 +5151,7 @@ def set_axis(
"labels",
[
("method", None),
- ("copy", True),
+ ("copy", None),
("level", None),
("fill_value", np.nan),
("limit", None),
@@ -5370,7 +5376,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: Literal[True],
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5385,7 +5391,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: Literal[False] = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5400,7 +5406,7 @@ def rename(
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
- copy: bool = ...,
+ copy: bool | None = ...,
inplace: bool = ...,
level: Level = ...,
errors: IgnoreRaise = ...,
@@ -5414,7 +5420,7 @@ def rename(
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
- copy: bool = True,
+ copy: bool | None = None,
inplace: bool = False,
level: Level = None,
errors: IgnoreRaise = "ignore",
@@ -6288,7 +6294,7 @@ class max type
if inplace:
new_obj = self
else:
- new_obj = self.copy()
+ new_obj = self.copy(deep=None)
if allow_duplicates is not lib.no_default:
allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates")
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 7ed6e0d84445c..6ab0b03f570c1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1034,7 +1034,7 @@ def _rename(
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
- copy: bool_t = True,
+ copy: bool_t | None = None,
inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
@@ -4145,6 +4145,12 @@ def _check_setitem_copy(self, t="setting", force=False):
df.iloc[0:5]['group'] = 'a'
"""
+ if (
+ config.get_option("mode.copy_on_write")
+ and config.get_option("mode.data_manager") == "block"
+ ):
+ return
+
# return early if the check is not needed
if not (force or self._is_copy):
return
@@ -5245,7 +5251,7 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
- copy = kwargs.pop("copy", True)
+ copy = kwargs.pop("copy", None)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
@@ -5270,9 +5276,7 @@ def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT:
for axis, ax in axes.items()
if ax is not None
):
- if copy:
- return self.copy()
- return self
+ return self.copy(deep=copy)
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
@@ -6249,7 +6253,7 @@ def astype(
return cast(NDFrameT, result)
@final
- def copy(self: NDFrameT, deep: bool_t = True) -> NDFrameT:
+ def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT:
"""
Make a copy of this object's indices and data.
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 88f81064b826f..dcf69dfda1ae8 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -527,6 +527,11 @@ def copy(self: T, deep=True) -> T:
-------
BlockManager
"""
+ if deep is None:
+ # ArrayManager does not yet support CoW, so deep=None always means
+ # deep=True for now
+ deep = True
+
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
@@ -591,6 +596,11 @@ def _reindex_indexer(
pandas-indexer with -1's only.
"""
+ if copy is None:
+ # ArrayManager does not yet support CoW, so deep=None always means
+ # deep=True for now
+ copy = True
+
if indexer is None:
if new_axis is self._axes[axis] and not copy:
return self
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 3e27cf0b15511..522fe3cb192ec 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -839,10 +839,13 @@ def _slice(
return self.values[slicer]
- def set_inplace(self, locs, values: ArrayLike) -> None:
+ def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None:
"""
Modify block values in-place with new item value.
+ If copy=True, first copy the underlying values in place before modifying
+ (for Copy-on-Write).
+
Notes
-----
`set_inplace` never creates a new array or new Block, whereas `setitem`
@@ -850,6 +853,8 @@ def set_inplace(self, locs, values: ArrayLike) -> None:
Caller is responsible for checking values.dtype == self.dtype.
"""
+ if copy:
+ self.values = self.values.copy()
self.values[locs] = values
def take_nd(
@@ -1665,9 +1670,11 @@ def iget(self, i: int | tuple[int, int] | tuple[slice, int]):
raise IndexError(f"{self} only contains one item")
return self.values
- def set_inplace(self, locs, values: ArrayLike) -> None:
+ def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None:
# When an ndarray, we should have locs.tolist() == [0]
# When a BlockPlacement we should have list(locs) == [0]
+ if copy:
+ self.values = self.values.copy()
self.values[:] = values
def _maybe_squeeze_arg(self, arg):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 4e84b013b2a11..3084bcea49f05 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -12,9 +12,12 @@
cast,
)
import warnings
+import weakref
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import (
algos as libalgos,
internals as libinternals,
@@ -143,6 +146,7 @@ class BaseBlockManager(DataManager):
_blklocs: npt.NDArray[np.intp]
blocks: tuple[Block, ...]
axes: list[Index]
+ refs: list[weakref.ref | None] | None
@property
def ndim(self) -> int:
@@ -151,11 +155,16 @@ def ndim(self) -> int:
_known_consolidated: bool
_is_consolidated: bool
- def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:
+ def __init__(self, blocks, axes, refs=None, verify_integrity: bool = True) -> None:
raise NotImplementedError
@classmethod
- def from_blocks(cls: type_t[T], blocks: list[Block], axes: list[Index]) -> T:
+ def from_blocks(
+ cls: type_t[T],
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> T:
raise NotImplementedError
@property
@@ -228,6 +237,33 @@ def is_single_block(self) -> bool:
def items(self) -> Index:
return self.axes[0]
+ def _has_no_reference(self, i: int) -> bool:
+ """
+ Check for column `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the column has no references.
+ """
+ blkno = self.blknos[i]
+ return self._has_no_reference_block(blkno)
+
+ def _has_no_reference_block(self, blkno: int) -> bool:
+ """
+ Check for block `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the block has no references.
+ """
+ # TODO(CoW) include `or self.refs[blkno]() is None` ?
+ return (
+ self.refs is None or self.refs[blkno] is None
+ ) and weakref.getweakrefcount(self.blocks[blkno]) == 0
+
+ def _clear_reference_block(self, blkno: int) -> None:
+ """
+ Clear any reference for column `i`.
+ """
+ if self.refs is not None:
+ self.refs[blkno] = None
+
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return dtypes.take(self.blknos)
@@ -240,6 +276,9 @@ def arrays(self) -> list[ArrayLike]:
Only for compatibility with ArrayManager for testing convenience.
Not to be used in actual code, and return value is not the same as the
ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).
+
+ Warning! The returned arrays don't handle Copy-on-Write, so this should
+ be used with caution (only in read-mode).
"""
return [blk.values for blk in self.blocks]
@@ -342,9 +381,23 @@ def setitem(self: T, indexer, value) -> T:
if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
raise ValueError(f"Cannot set values with ndim > {self.ndim}")
+ if _using_copy_on_write() and not self._has_no_reference(0):
+ # if being referenced -> perform Copy-on-Write and clear the reference
+ # this method is only called if there is a single block -> hardcoded 0
+ self = self.copy()
+
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
+ if (
+ _using_copy_on_write()
+ and self.refs is not None
+ and not all(ref is None for ref in self.refs)
+ ):
+ # some reference -> copy full dataframe
+ # TODO(CoW) this could be optimized to only copy the blocks that would
+ # get modified
+ self = self.copy()
if align:
align_keys = ["new", "mask"]
@@ -378,6 +431,12 @@ def fillna(self: T, value, limit, inplace: bool, downcast) -> T:
if limit is not None:
# Do this validation even if we go through one of the no-op paths
limit = libalgos.validate_limit(None, limit=limit)
+ if inplace:
+ # TODO(CoW) can be optimized to only copy those blocks that have refs
+ if _using_copy_on_write() and any(
+ not self._has_no_reference_block(i) for i in range(len(self.blocks))
+ ):
+ self = self.copy()
return self.apply(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
@@ -527,17 +586,24 @@ def _combine(
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks: list[Block] = []
+ # TODO(CoW) we could optimize here if we know that the passed blocks
+ # are fully "owned" (eg created from an operation, not coming from
+ # an existing manager)
+ new_refs: list[weakref.ref | None] | None = None if copy else []
for b in blocks:
- b = b.copy(deep=copy)
- b.mgr_locs = BlockPlacement(inv_indexer[b.mgr_locs.indexer])
- new_blocks.append(b)
+ nb = b.copy(deep=copy)
+ nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])
+ new_blocks.append(nb)
+ if not copy:
+ # None has no attribute "append"
+ new_refs.append(weakref.ref(b)) # type: ignore[union-attr]
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
- return type(self).from_blocks(new_blocks, axes)
+ return type(self).from_blocks(new_blocks, axes, new_refs)
@property
def nblocks(self) -> int:
@@ -549,14 +615,22 @@ def copy(self: T, deep=True) -> T:
Parameters
----------
- deep : bool or string, default True
- If False, return shallow copy (do not copy data)
+ deep : bool, string or None, default True
+ If False or None, return a shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
+ if deep is None:
+ if _using_copy_on_write():
+ # use shallow copy
+ deep = False
+ else:
+ # preserve deep copy for BlockManager with copy=None
+ deep = True
+
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
@@ -569,8 +643,14 @@ def copy_func(ax):
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
+ new_refs: list[weakref.ref | None] | None
+ if deep:
+ new_refs = None
+ else:
+ new_refs = [weakref.ref(blk) for blk in self.blocks]
res.axes = new_axes
+ res.refs = new_refs
if self.ndim > 1:
# Avoid needing to re-compute these
@@ -594,7 +674,7 @@ def consolidate(self: T) -> T:
if self.is_consolidated():
return self
- bm = type(self)(self.blocks, self.axes, verify_integrity=False)
+ bm = type(self)(self.blocks, self.axes, self.refs, verify_integrity=False)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
@@ -606,7 +686,7 @@ def reindex_indexer(
axis: int,
fill_value=None,
allow_dups: bool = False,
- copy: bool = True,
+ copy: bool | None = True,
only_slice: bool = False,
*,
use_na_proxy: bool = False,
@@ -619,7 +699,8 @@ def reindex_indexer(
axis : int
fill_value : object, default None
allow_dups : bool, default False
- copy : bool, default True
+ copy : bool or None, default True
+ If None, regard as False to get shallow copy.
only_slice : bool, default False
Whether to take views, not copies, along columns.
use_na_proxy : bool, default False
@@ -627,6 +708,14 @@ def reindex_indexer(
pandas-indexer with -1's only.
"""
+ if copy is None:
+ if _using_copy_on_write():
+ # use shallow copy
+ copy = False
+ else:
+ # preserve deep copy for BlockManager with copy=None
+ copy = True
+
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
@@ -644,7 +733,7 @@ def reindex_indexer(
raise IndexError("Requested axis not found in manager")
if axis == 0:
- new_blocks = self._slice_take_blocks_ax0(
+ new_blocks, new_refs = self._slice_take_blocks_ax0(
indexer,
fill_value=fill_value,
only_slice=only_slice,
@@ -661,11 +750,12 @@ def reindex_indexer(
)
for blk in self.blocks
]
+ new_refs = None
new_axes = list(self.axes)
new_axes[axis] = new_axis
- new_mgr = type(self).from_blocks(new_blocks, new_axes)
+ new_mgr = type(self).from_blocks(new_blocks, new_axes, new_refs)
if axis == 1:
# We can avoid the need to rebuild these
new_mgr._blknos = self.blknos.copy()
@@ -679,7 +769,7 @@ def _slice_take_blocks_ax0(
only_slice: bool = False,
*,
use_na_proxy: bool = False,
- ) -> list[Block]:
+ ) -> tuple[list[Block], list[weakref.ref | None]]:
"""
Slice/take blocks along axis=0.
@@ -712,9 +802,11 @@ def _slice_take_blocks_ax0(
# GH#32959 EABlock would fail since we can't make 0-width
# TODO(EA2D): special casing unnecessary with 2D EAs
if sllen == 0:
- return []
+ return [], []
bp = BlockPlacement(slice(0, sllen))
- return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]
+ return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)], [
+ weakref.ref(blk)
+ ]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
fill_value = blk.fill_value
@@ -730,7 +822,7 @@ def _slice_take_blocks_ax0(
]
# We have
# all(np.shares_memory(nb.values, blk.values) for nb in blocks)
- return blocks
+ return blocks, [weakref.ref(blk)] * len(blocks)
else:
bp = BlockPlacement(slice(0, sllen))
return [
@@ -740,7 +832,7 @@ def _slice_take_blocks_ax0(
new_mgr_locs=bp,
fill_value=fill_value,
)
- ]
+ ], [None]
if sl_type == "slice":
blknos = self.blknos[slobj]
@@ -756,6 +848,7 @@ def _slice_take_blocks_ax0(
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
+ refs: list[weakref.ref | None] = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
@@ -768,6 +861,7 @@ def _slice_take_blocks_ax0(
use_na_proxy=use_na_proxy,
)
)
+ refs.append(None)
else:
blk = self.blocks[blkno]
@@ -781,18 +875,20 @@ def _slice_take_blocks_ax0(
newblk = blk.copy(deep=False)
newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
blocks.append(newblk)
+ refs.append(weakref.ref(blk))
else:
# GH#32779 to avoid the performance penalty of copying,
# we may try to only slice
taker = blklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
- if only_slice:
+ if only_slice or _using_copy_on_write():
taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
+ refs.append(weakref.ref(blk))
elif only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
@@ -802,11 +898,13 @@ def _slice_take_blocks_ax0(
nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
# We have np.shares_memory(nb.values, blk.values)
blocks.append(nb)
+ refs.append(weakref.ref(blk))
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
+ refs.append(None)
- return blocks
+ return blocks, refs
def _make_na_block(
self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False
@@ -873,6 +971,7 @@ def take(
indexer=indexer,
axis=axis,
allow_dups=True,
+ copy=None,
)
@@ -890,6 +989,7 @@ def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
+ refs: list[weakref.ref | None] | None = None,
verify_integrity: bool = True,
) -> None:
@@ -939,13 +1039,26 @@ def _verify_integrity(self) -> None:
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
+ if self.refs is not None:
+ if len(self.refs) != len(self.blocks):
+ raise AssertionError(
+ "Number of passed refs must equal the number of blocks: "
+ f"{len(self.refs)} refs vs {len(self.blocks)} blocks."
+ "\nIf you see this error, please report a bug at "
+ "https://github.com/pandas-dev/pandas/issues"
+ )
@classmethod
- def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager:
+ def from_blocks(
+ cls,
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> BlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
- return cls(blocks, axes, verify_integrity=False)
+ return cls(blocks, axes, refs, verify_integrity=False)
# ----------------------------------------------------------------
# Indexing
@@ -965,7 +1078,9 @@ def fast_xs(self, loc: int) -> SingleBlockManager:
if len(self.blocks) == 1:
result = self.blocks[0].iget((slice(None), loc))
block = new_block(result, placement=slice(0, len(result)), ndim=1)
- return SingleBlockManager(block, self.axes[0])
+ # in the case of a single block, the new block is a view
+ ref = weakref.ref(self.blocks[0])
+ return SingleBlockManager(block, self.axes[0], [ref])
dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
@@ -996,12 +1111,16 @@ def iget(self, i: int) -> SingleBlockManager:
# shortcut for select a single-dim from a 2-dim BM
bp = BlockPlacement(slice(0, len(values)))
nb = type(block)(values, placement=bp, ndim=1)
- return SingleBlockManager(nb, self.axes[1])
+ return SingleBlockManager(nb, self.axes[1], [weakref.ref(block)])
def iget_values(self, i: int) -> ArrayLike:
"""
Return the data for column i as the values (ndarray or ExtensionArray).
+
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
+ so this should be used with caution.
"""
+ # TODO(CoW) making the arrays read-only might make this safer to use?
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
return values
@@ -1011,6 +1130,9 @@ def column_arrays(self) -> list[np.ndarray]:
"""
Used in the JSON C code to access column arrays.
This optimizes compared to using `iget_values` by converting each
+
+ Warning! This doesn't handle Copy-on-Write, so should be used with
+ caution (current use case of consuming this in the JSON code is fine).
"""
# This is an optimized equivalent to
# result = [self.iget_values(i) for i in range(len(self.items))]
@@ -1102,7 +1224,12 @@ def value_getitem(placement):
blk = self.blocks[blkno_l]
blk_locs = blklocs[val_locs.indexer]
if inplace and blk.should_store(value):
- blk.set_inplace(blk_locs, value_getitem(val_locs))
+ # Updating inplace -> check if we need to do Copy-on-Write
+ if _using_copy_on_write() and not self._has_no_reference_block(blkno_l):
+ blk.set_inplace(blk_locs, value_getitem(val_locs), copy=True)
+ self._clear_reference_block(blkno_l)
+ else:
+ blk.set_inplace(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
@@ -1117,9 +1244,11 @@ def value_getitem(placement):
)
self.blocks = blocks_tup
self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
+ # blk.delete gives a copy, so we can remove a possible reference
+ self._clear_reference_block(blkno_l)
if len(removed_blknos):
- # Remove blocks & update blknos accordingly
+ # Remove blocks & update blknos and refs accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
@@ -1130,6 +1259,12 @@ def value_getitem(placement):
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
+ if self.refs is not None:
+ self.refs = [
+ ref
+ for i, ref in enumerate(self.refs)
+ if i not in set(removed_blknos)
+ ]
if unfit_val_locs:
unfit_idxr = np.concatenate(unfit_mgr_locs)
@@ -1166,6 +1301,10 @@ def value_getitem(placement):
self._blklocs[unfit_idxr] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
+ # TODO(CoW) is this always correct to assume that the new_blocks
+ # are not referencing anything else?
+ if self.refs is not None:
+ self.refs = list(self.refs) + [None] * len(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
@@ -1183,14 +1322,20 @@ def _iset_single(
# Caller is responsible for verifying value.shape
if inplace and blk.should_store(value):
+ copy = False
+ if _using_copy_on_write() and not self._has_no_reference_block(blkno):
+ # perform Copy-on-Write and clear the reference
+ copy = True
+ self._clear_reference_block(blkno)
iloc = self.blklocs[loc]
- blk.set_inplace(slice(iloc, iloc + 1), value)
+ blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)
return
nb = new_block_2d(value, placement=blk._mgr_locs)
old_blocks = self.blocks
new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]
self.blocks = new_blocks
+ self._clear_reference_block(blkno)
return
def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value) -> None:
@@ -1200,6 +1345,14 @@ def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value) -> None
This is a method on the BlockManager level, to avoid creating an
intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
"""
+ if _using_copy_on_write() and not self._has_no_reference(loc):
+ # otherwise perform Copy-on-Write and clear the reference
+ blkno = self.blknos[loc]
+ blocks = list(self.blocks)
+ blocks[blkno] = blocks[blkno].copy()
+ self.blocks = tuple(blocks)
+ self._clear_reference_block(blkno)
+
col_mgr = self.iget(loc)
new_mgr = col_mgr.setitem((idx,), value)
self.iset(loc, new_mgr._block.values, inplace=True)
@@ -1239,6 +1392,9 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
self.axes[0] = new_axis
self.blocks += (block,)
+ # TODO(CoW) do we always "own" the passed `value`?
+ if self.refs is not None:
+ self.refs += [None]
self._known_consolidated = False
@@ -1292,10 +1448,10 @@ def idelete(self, indexer) -> BlockManager:
is_deleted[indexer] = True
taker = (~is_deleted).nonzero()[0]
- nbs = self._slice_take_blocks_ax0(taker, only_slice=True)
+ nbs, new_refs = self._slice_take_blocks_ax0(taker, only_slice=True)
new_columns = self.items[~is_deleted]
axes = [new_columns, self.axes[1]]
- return type(self)(tuple(nbs), axes, verify_integrity=False)
+ return type(self)(tuple(nbs), axes, new_refs, verify_integrity=False)
# ----------------------------------------------------------------
# Block-wise Operation
@@ -1550,6 +1706,7 @@ def as_array(
-------
arr : ndarray
"""
+ # TODO(CoW) handle case where resulting array is a view
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose()
@@ -1674,7 +1831,10 @@ def _consolidate_inplace(self) -> None:
# the DataFrame's _item_cache. The exception is for newly-created
# BlockManager objects not yet attached to a DataFrame.
if not self.is_consolidated():
- self.blocks = tuple(_consolidate(self.blocks))
+ if self.refs is None:
+ self.blocks = _consolidate(self.blocks)
+ else:
+ self.blocks, self.refs = _consolidate_with_refs(self.blocks, self.refs)
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
@@ -1696,6 +1856,7 @@ def __init__(
self,
block: Block,
axis: Index,
+ refs: list[weakref.ref | None] | None = None,
verify_integrity: bool = False,
fastpath=lib.no_default,
) -> None:
@@ -1713,15 +1874,23 @@ def __init__(
self.axes = [axis]
self.blocks = (block,)
+ self.refs = refs
@classmethod
- def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> SingleBlockManager:
+ def from_blocks(
+ cls,
+ blocks: list[Block],
+ axes: list[Index],
+ refs: list[weakref.ref | None] | None = None,
+ ) -> SingleBlockManager:
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
- return cls(blocks[0], axes[0], verify_integrity=False)
+ if refs is not None:
+ assert len(refs) == 1
+ return cls(blocks[0], axes[0], refs, verify_integrity=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> SingleBlockManager:
@@ -1740,7 +1909,18 @@ def to_2d_mgr(self, columns: Index) -> BlockManager:
bp = BlockPlacement(0)
new_blk = type(blk)(arr, placement=bp, ndim=2)
axes = [columns, self.axes[0]]
- return BlockManager([new_blk], axes=axes, verify_integrity=False)
+ refs: list[weakref.ref | None] = [weakref.ref(blk)]
+ return BlockManager([new_blk], axes=axes, refs=refs, verify_integrity=False)
+
+ def _has_no_reference(self, i: int = 0) -> bool:
+ """
+ Check for column `i` if it has references.
+ (whether it references another array or is itself being referenced)
+ Returns True if the column has no references.
+ """
+ return (self.refs is None or self.refs[0] is None) and weakref.getweakrefcount(
+ self.blocks[0]
+ ) == 0
def __getstate__(self):
block_values = [b.values for b in self.blocks]
@@ -1810,7 +1990,9 @@ def getitem_mgr(self, indexer: slice | npt.NDArray[np.bool_]) -> SingleBlockMana
block = type(blk)(array, placement=bp, ndim=1)
new_idx = self.index[indexer]
- return type(self)(block, new_idx)
+ # TODO(CoW) in theory only need to track reference if new_array is a view
+ ref = weakref.ref(blk)
+ return type(self)(block, new_idx, [ref])
def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
# Assertion disabled for performance
@@ -1823,7 +2005,7 @@ def get_slice(self, slobj: slice, axis: int = 0) -> SingleBlockManager:
bp = BlockPlacement(slice(0, len(array)))
block = type(blk)(array, placement=bp, ndim=1)
new_index = self.index._getitem_slice(slobj)
- return type(self)(block, new_index)
+ return type(self)(block, new_index, [weakref.ref(blk)])
@property
def index(self) -> Index:
@@ -1850,15 +2032,30 @@ def array_values(self):
def get_numeric_data(self, copy: bool = False):
if self._block.is_numeric:
- if copy:
- return self.copy()
- return self
+ return self.copy(deep=copy)
return self.make_empty()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
+ def setitem_inplace(self, indexer, value) -> None:
+ """
+ Set values with indexer.
+
+ For Single[Block/Array]Manager, this backs s[indexer] = value
+
+ This is an inplace version of `setitem()`, mutating the manager/values
+ in place, not returning a new Manager (and Block), and thus never changing
+ the dtype.
+ """
+ if _using_copy_on_write() and not self._has_no_reference(0):
+ self.blocks = (self._block.copy(),)
+ self.refs = None
+ self._cache.clear()
+
+ super().setitem_inplace(indexer, value)
+
def idelete(self, indexer) -> SingleBlockManager:
"""
Delete single location from SingleBlockManager.
@@ -1869,6 +2066,8 @@ def idelete(self, indexer) -> SingleBlockManager:
self.blocks = (nb,)
self.axes[0] = self.axes[0].delete(indexer)
self._cache.clear()
+ # clear reference since delete always results in a new array
+ self.refs = None
return self
def fast_xs(self, loc):
@@ -1885,6 +2084,9 @@ def set_values(self, values: ArrayLike):
Use at your own risk! This does not check if the passed values are
valid for the current Block/SingleBlockManager (length, dtype, etc).
"""
+ # TODO(CoW) do we need to handle copy on write here? Currently this is
+ # only used for FrameColumnApply.series_generator (what if apply is
+ # mutating inplace?)
self.blocks[0].values = values
self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))
@@ -2068,7 +2270,7 @@ def _stack_arrays(tuples, dtype: np.dtype):
return stacked, placement
-def _consolidate(blocks: tuple[Block, ...]) -> list[Block]:
+def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
@@ -2078,19 +2280,44 @@ def _consolidate(blocks: tuple[Block, ...]) -> list[Block]:
new_blocks: list[Block] = []
for (_can_consolidate, dtype), group_blocks in grouper:
- merged_blocks = _merge_blocks(
+ merged_blocks, _ = _merge_blocks(
+ list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
+ )
+ new_blocks = extend_blocks(merged_blocks, new_blocks)
+ return tuple(new_blocks)
+
+
+def _consolidate_with_refs(
+ blocks: tuple[Block, ...], refs
+) -> tuple[tuple[Block, ...], list[weakref.ref | None]]:
+ """
+ Merge blocks having same dtype, exclude non-consolidating blocks, handling
+ refs
+ """
+ gkey = lambda x: x[0]._consolidate_key
+ grouper = itertools.groupby(sorted(zip(blocks, refs), key=gkey), gkey)
+
+ new_blocks: list[Block] = []
+ new_refs: list[weakref.ref | None] = []
+ for (_can_consolidate, dtype), group_blocks_refs in grouper:
+ group_blocks, group_refs = list(zip(*list(group_blocks_refs)))
+ merged_blocks, consolidated = _merge_blocks(
list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks = extend_blocks(merged_blocks, new_blocks)
- return new_blocks
+ if consolidated:
+ new_refs.extend([None])
+ else:
+ new_refs.extend(group_refs)
+ return tuple(new_blocks), new_refs
def _merge_blocks(
blocks: list[Block], dtype: DtypeObj, can_consolidate: bool
-) -> list[Block]:
+) -> tuple[list[Block], bool]:
if len(blocks) == 1:
- return blocks
+ return blocks, False
if can_consolidate:
@@ -2116,10 +2343,10 @@ def _merge_blocks(
new_mgr_locs = new_mgr_locs[argsort]
bp = BlockPlacement(new_mgr_locs)
- return [new_block_2d(new_values, placement=bp)]
+ return [new_block_2d(new_values, placement=bp)], True
# can't consolidate --> no merge
- return blocks
+ return blocks, False
def _fast_count_smallints(arr: npt.NDArray[np.intp]):
@@ -2152,3 +2379,7 @@ def _preprocess_slice_or_indexer(
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
+
+
+def _using_copy_on_write():
+ return get_option("mode.copy_on_write")
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index 1160d3b2a8e3a..5febb302a9de9 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -36,7 +36,7 @@ def _iter_block_pairs(
left_ea = blk_vals.ndim == 1
- rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
+ rblks, _ = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
# Assertions are disabled for performance, but should hold:
# if left_ea:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f55d6a26255a0..f5e44c732aff8 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1301,7 +1301,16 @@ def _maybe_update_cacher(
# a copy
if ref is None:
del self._cacher
- elif len(self) == len(ref) and self.name in ref.columns:
+ # for CoW, we never want to update the parent DataFrame cache
+ # if the Series changed, and always pop the cached item
+ elif (
+ not (
+ get_option("mode.copy_on_write")
+ and get_option("mode.data_manager") == "block"
+ )
+ and len(self) == len(ref)
+ and self.name in ref.columns
+ ):
# GH#42530 self.name must be in ref.columns
# to ensure column still in dataframe
# otherwise, either self or ref has swapped in new arrays
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 22b8f9d020a5f..a0b9a0d247533 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -341,8 +341,10 @@ def convert_delta_safe(base, deltas, unit) -> Series:
has_bad_values = False
if bad_locs.any():
has_bad_values = True
- data_col = Series(dates)
- data_col[bad_locs] = 1.0 # Replace with NaT
+ # reset cache to avoid SettingWithCopy checks (we own the DataFrame and the
+ # `dates` Series is used to overwrite itself in the DataFramae)
+ dates._reset_cacher()
+ dates[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index ff3abaf819206..d2882f46d25bf 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1440,7 +1440,7 @@ def test_apply_dtype(col):
tm.assert_series_equal(result, expected)
-def test_apply_mutating(using_array_manager):
+def test_apply_mutating(using_array_manager, using_copy_on_write):
# GH#35462 case where applied func pins a new BlockManager to a row
df = DataFrame({"a": range(100), "b": range(100, 200)})
df_orig = df.copy()
@@ -1457,12 +1457,13 @@ def func(row):
result = df.apply(func, axis=1)
tm.assert_frame_equal(result, expected)
- if not using_array_manager:
+ if using_copy_on_write or using_array_manager:
+ # INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent
# INFO(ArrayManager) With BlockManager, the row is a view and mutated in place,
# with ArrayManager the row is not a view, and thus not mutated in place
- tm.assert_frame_equal(df, result)
- else:
tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, result)
def test_apply_empty_list_reduce():
diff --git a/pandas/tests/copy_view/test_indexing.py b/pandas/tests/copy_view/test_indexing.py
index 6eb7237c4f41c..d917a3c79aa97 100644
--- a/pandas/tests/copy_view/test_indexing.py
+++ b/pandas/tests/copy_view/test_indexing.py
@@ -150,7 +150,7 @@ def test_subset_column_slice(using_copy_on_write, using_array_manager, dtype):
ids=["slice", "mask", "array"],
)
def test_subset_loc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager
+ dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
):
# Case: taking a subset of the rows+columns of a DataFrame using .loc
# + afterwards modifying the subset
@@ -177,7 +177,7 @@ def test_subset_loc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or dtype == "int64")
+ and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
@@ -197,7 +197,7 @@ def test_subset_loc_rows_columns(
ids=["slice", "mask", "array"],
)
def test_subset_iloc_rows_columns(
- dtype, row_indexer, column_indexer, using_array_manager
+ dtype, row_indexer, column_indexer, using_array_manager, using_copy_on_write
):
# Case: taking a subset of the rows+columns of a DataFrame using .iloc
# + afterwards modifying the subset
@@ -224,7 +224,7 @@ def test_subset_iloc_rows_columns(
if (
isinstance(row_indexer, slice)
and isinstance(column_indexer, slice)
- and (using_array_manager or dtype == "int64")
+ and (using_array_manager or (dtype == "int64" and not using_copy_on_write))
):
df_orig.iloc[1, 1] = 0
tm.assert_frame_equal(df, df_orig)
diff --git a/pandas/tests/copy_view/test_internals.py b/pandas/tests/copy_view/test_internals.py
new file mode 100644
index 0000000000000..2191fc1b33218
--- /dev/null
+++ b/pandas/tests/copy_view/test_internals.py
@@ -0,0 +1,45 @@
+import numpy as np
+
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+from pandas.tests.copy_view.util import get_array
+
+
+@td.skip_array_manager_invalid_test
+def test_consolidate(using_copy_on_write):
+
+ # create unconsolidated DataFrame
+ df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
+ df["c"] = [4, 5, 6]
+
+ # take a viewing subset
+ subset = df[:]
+
+ # each block of subset references a block of df
+ assert subset._mgr.refs is not None and all(
+ ref is not None for ref in subset._mgr.refs
+ )
+
+ # consolidate the two int64 blocks
+ subset._consolidate_inplace()
+
+ # the float64 block still references the parent one because it still a view
+ assert subset._mgr.refs[0] is not None
+ # equivalent of assert np.shares_memory(df["b"].values, subset["b"].values)
+ # but avoids caching df["b"]
+ assert np.shares_memory(get_array(df, "b"), get_array(subset, "b"))
+
+ # the new consolidated int64 block does not reference another
+ assert subset._mgr.refs[1] is None
+
+ # the parent dataframe now also only is linked for the float column
+ assert df._mgr._has_no_reference(0)
+ assert not df._mgr._has_no_reference(1)
+ assert df._mgr._has_no_reference(2)
+
+ # and modifying subset still doesn't modify parent
+ if using_copy_on_write:
+ subset.iloc[0, 1] = 0.0
+ assert df._mgr._has_no_reference(1)
+ assert df.loc[0, "b"] == 0.1
diff --git a/pandas/tests/copy_view/test_methods.py b/pandas/tests/copy_view/test_methods.py
index 1ed458e95b78e..cc4c219e6c5d9 100644
--- a/pandas/tests/copy_view/test_methods.py
+++ b/pandas/tests/copy_view/test_methods.py
@@ -1,6 +1,9 @@
import numpy as np
-from pandas import DataFrame
+from pandas import (
+ DataFrame,
+ Series,
+)
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array
@@ -126,3 +129,47 @@ def test_reindex_columns(using_copy_on_write):
if using_copy_on_write:
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
tm.assert_frame_equal(df, df_orig)
+
+
+def test_select_dtypes(using_copy_on_write):
+ # Case: selecting columns using `select_dtypes()` returns a new dataframe
+ # + afterwards modifying the result
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
+ df_orig = df.copy()
+ df2 = df.select_dtypes("int64")
+ df2._mgr._verify_integrity()
+
+ # currently this always returns a "view"
+ assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+
+ # mutating df2 triggers a copy-on-write for that column/block
+ df2.iloc[0, 0] = 0
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ # but currently select_dtypes() actually returns a view -> mutates parent
+ df_orig.iloc[0, 0] = 0
+ tm.assert_frame_equal(df, df_orig)
+
+
+def test_to_frame(using_copy_on_write):
+ # Case: converting a Series to a DataFrame with to_frame
+ ser = Series([1, 2, 3])
+ ser_orig = ser.copy()
+
+ df = ser.to_frame()
+
+ # currently this always returns a "view"
+ assert np.shares_memory(ser.values, get_array(df, 0))
+
+ df.iloc[0, 0] = 0
+
+ if using_copy_on_write:
+ # mutating df triggers a copy-on-write for that column
+ assert not np.shares_memory(ser.values, get_array(df, 0))
+ tm.assert_series_equal(ser, ser_orig)
+ else:
+ # but currently select_dtypes() actually returns a view -> mutates parent
+ ser_orig.iloc[0] = 0
+ tm.assert_series_equal(ser, ser_orig)
diff --git a/pandas/tests/copy_view/test_setitem.py b/pandas/tests/copy_view/test_setitem.py
index dd42983179806..9e0d350dde0de 100644
--- a/pandas/tests/copy_view/test_setitem.py
+++ b/pandas/tests/copy_view/test_setitem.py
@@ -34,8 +34,9 @@ def test_set_column_with_series(using_copy_on_write):
df["c"] = ser
if using_copy_on_write:
- # with CoW we can delay the copy
- assert np.shares_memory(df["c"].values, ser.values)
+ # TODO(CoW) with CoW we can delay the copy
+ # assert np.shares_memory(df["c"].values, ser.values)
+ assert not np.shares_memory(df["c"].values, ser.values)
else:
# the series data is copied
assert not np.shares_memory(df["c"].values, ser.values)
@@ -78,8 +79,9 @@ def test_set_columns_with_dataframe(using_copy_on_write):
df[["c", "d"]] = df2
if using_copy_on_write:
- # with CoW we can delay the copy
- assert np.shares_memory(df["c"].values, df2["c"].values)
+ # TODO(CoW) with CoW we can delay the copy
+ # assert np.shares_memory(df["c"].values, df2["c"].values)
+ assert not np.shares_memory(df["c"].values, df2["c"].values)
else:
# the data is copied
assert not np.shares_memory(df["c"].values, df2["c"].values)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 04fa3c11a6c40..775d9c4cbcc45 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -398,6 +398,7 @@ def test_setitem_frame_2d_values(self, data):
# Avoiding using_array_manager fixture
# https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410
using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager)
+ using_copy_on_write = pd.options.mode.copy_on_write
blk_data = df._mgr.arrays[0]
@@ -422,7 +423,7 @@ def test_setitem_frame_2d_values(self, data):
with tm.assert_produces_warning(warn, match=msg):
df.iloc[:] = df.values
self.assert_frame_equal(df, orig)
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
# GH#33457 Check that this setting occurred in-place
# FIXME(ArrayManager): this should work there too
assert df._mgr.arrays[0] is blk_data
diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py
index 7994c56f8d68b..a98fa52e1009d 100644
--- a/pandas/tests/frame/indexing/test_getitem.py
+++ b/pandas/tests/frame/indexing/test_getitem.py
@@ -357,12 +357,18 @@ def test_getitem_empty_frame_with_boolean(self):
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
- def test_getitem_returns_view_when_column_is_unique_in_df(self):
+ def test_getitem_returns_view_when_column_is_unique_in_df(
+ self, using_copy_on_write
+ ):
# GH#45316
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
+ df_orig = df.copy()
view = df["b"]
view.loc[:] = 100
- expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"])
+ if using_copy_on_write:
+ expected = df_orig
+ else:
+ expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"])
tm.assert_frame_equal(df, expected)
def test_getitem_frozenset_unique_in_column(self):
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9027ce8109810..6eecf4c18f182 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -269,7 +269,7 @@ def test_setattr_column(self):
df.foobar = 5
assert (df.foobar == 5).all()
- def test_setitem(self, float_frame):
+ def test_setitem(self, float_frame, using_copy_on_write):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
@@ -305,8 +305,12 @@ def test_setitem(self, float_frame):
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # With CoW, adding a new column doesn't raise a warning
smaller["col10"] = ["1", "2"]
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
@@ -536,22 +540,29 @@ def test_getitem_setitem_integer_slice_keyerrors(self):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
- def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
+ def test_fancy_getitem_slice_mixed(
+ self, float_frame, float_string_frame, using_copy_on_write
+ ):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
+ original = float_frame.copy()
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
- sliced.loc[:, "C"] = 4.0
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ sliced.loc[:, "C"] = 4.0
- assert (float_frame["C"] == 4).all()
+ assert (float_frame["C"] == 4).all()
+ else:
+ sliced.loc[:, "C"] = 4.0
+ tm.assert_frame_equal(float_frame, original)
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
@@ -994,7 +1005,7 @@ def test_iloc_row(self):
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
- def test_iloc_row_slice_view(self, using_array_manager):
+ def test_iloc_row_slice_view(self, using_array_manager, using_copy_on_write):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
@@ -1004,14 +1015,17 @@ def test_iloc_row_slice_view(self, using_array_manager):
assert np.shares_memory(df[2], subset[2])
+ exp_col = original[2].copy()
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
subset.loc[:, 2] = 0.0
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ subset.loc[:, 2] = 0.0
- exp_col = original[2].copy()
- # TODO(ArrayManager) verify it is expected that the original didn't change
- if not using_array_manager:
- exp_col._values[4:8] = 0.0
+ # TODO(ArrayManager) verify it is expected that the original didn't change
+ if not using_array_manager:
+ exp_col._values[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
@@ -1036,14 +1050,13 @@ def test_iloc_col(self):
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
- def test_iloc_col_slice_view(self, using_array_manager):
+ def test_iloc_col_slice_view(self, using_array_manager, using_copy_on_write):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
# verify slice is view
-
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
@@ -1053,7 +1066,9 @@ def test_iloc_col_slice_view(self, using_array_manager):
assert (df[8] == 0).all()
else:
- # TODO(ArrayManager) verify this is the desired behaviour
+ if using_copy_on_write:
+ # verify slice is view
+ assert np.shares_memory(df[8]._values, subset[8]._values)
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 6b19738becc8e..6d6ae9d646d4e 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -801,7 +801,7 @@ def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
class TestDataFrameSetItemWithExpansion:
- def test_setitem_listlike_views(self):
+ def test_setitem_listlike_views(self, using_copy_on_write):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
@@ -814,7 +814,10 @@ def test_setitem_listlike_views(self):
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
- expected = Series([100, 2, 3], name="a")
+ if using_copy_on_write:
+ expected = Series([1, 2, 3], name="a")
+ else:
+ expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
@@ -824,7 +827,7 @@ def test_setitem_string_column_numpy_dtype_raising(self):
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
- def test_setitem_empty_df_duplicate_columns(self):
+ def test_setitem_empty_df_duplicate_columns(self, using_copy_on_write):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
@@ -1134,7 +1137,9 @@ def test_setitem_always_copy(self, float_frame):
assert notna(s[5:10]).all()
@pytest.mark.parametrize("consolidate", [True, False])
- def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):
+ def test_setitem_partial_column_inplace(
+ self, consolidate, using_array_manager, using_copy_on_write
+ ):
# This setting should be in-place, regardless of whether frame is
# single-block or multi-block
# GH#304 this used to be incorrectly not-inplace, in which case
@@ -1159,8 +1164,9 @@ def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):
tm.assert_series_equal(df["z"], expected)
# check setting occurred in-place
- tm.assert_numpy_array_equal(zvals, expected.values)
- assert np.shares_memory(zvals, df["z"]._values)
+ if not using_copy_on_write:
+ tm.assert_numpy_array_equal(zvals, expected.values)
+ assert np.shares_memory(zvals, df["z"]._values)
def test_setitem_duplicate_columns_not_inplace(self):
# GH#39510
diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py
index 898722d6d77ae..5951c1dd6e45e 100644
--- a/pandas/tests/frame/indexing/test_xs.py
+++ b/pandas/tests/frame/indexing/test_xs.py
@@ -111,13 +111,17 @@ def test_xs_keep_level(self):
result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False)
tm.assert_frame_equal(result, expected)
- def test_xs_view(self, using_array_manager):
+ def test_xs_view(self, using_array_manager, using_copy_on_write):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5))
+ df_orig = dm.copy()
- if using_array_manager:
+ if using_copy_on_write:
+ dm.xs(2)[:] = 20
+ tm.assert_frame_equal(dm, df_orig)
+ elif using_array_manager:
# INFO(ArrayManager) with ArrayManager getting a row as a view is
# not possible
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
@@ -176,27 +180,41 @@ def test_xs_level_eq_2(self):
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
- def test_xs_setting_with_copy_error(self, multiindex_dataframe_random_data):
+ def test_xs_setting_with_copy_error(
+ self, multiindex_dataframe_random_data, using_copy_on_write
+ ):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
+ df_orig = df.copy()
result = df.xs("two", level="second")
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
result[:] = 10
+ else:
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ result[:] = 10
+ tm.assert_frame_equal(df, df_orig)
- def test_xs_setting_with_copy_error_multiple(self, four_level_index_dataframe):
+ def test_xs_setting_with_copy_error_multiple(
+ self, four_level_index_dataframe, using_copy_on_write
+ ):
# this is a copy in 0.14
df = four_level_index_dataframe
+ df_orig = df.copy()
result = df.xs(("a", 4), level=["one", "four"])
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
result[:] = 10
+ else:
+ # setting this will give a SettingWithCopyError
+ # as we are trying to write a view
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ result[:] = 10
+ tm.assert_frame_equal(df, df_orig)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data):
@@ -359,15 +377,20 @@ def test_xs_droplevel_false(self):
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(result, expected)
- def test_xs_droplevel_false_view(self, using_array_manager):
+ def test_xs_droplevel_false_view(self, using_array_manager, using_copy_on_write):
# GH#37832
df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
# check that result still views the same data as df
assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values)
- # modifying original df also modifies result when having a single block
+
df.iloc[0, 0] = 2
- expected = DataFrame({"a": [2]})
+ if using_copy_on_write:
+ # with copy on write the subset is never modified
+ expected = DataFrame({"a": [1]})
+ else:
+ # modifying original df also modifies result when having a single block
+ expected = DataFrame({"a": [2]})
tm.assert_frame_equal(result, expected)
# with mixed dataframe, modifying the parent doesn't modify result
@@ -375,7 +398,10 @@ def test_xs_droplevel_false_view(self, using_array_manager):
df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"]))
result = df.xs("a", axis=1, drop_level=False)
df.iloc[0, 0] = 2
- if using_array_manager:
+ if using_copy_on_write:
+ # with copy on write the subset is never modified
+ expected = DataFrame({"a": [1]})
+ elif using_array_manager:
# Here the behavior is consistent
expected = DataFrame({"a": [2]})
else:
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index d86c1b2aedcac..20e59ed72666a 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -20,13 +20,16 @@
class TestFillNA:
@td.skip_array_manager_not_yet_implemented
- def test_fillna_on_column_view(self):
+ def test_fillna_on_column_view(self, using_copy_on_write):
# GH#46149 avoid unnecessary copies
arr = np.full((40, 50), np.nan)
df = DataFrame(arr)
df[0].fillna(-1, inplace=True)
- assert (arr[:, 0] == -1).all()
+ if using_copy_on_write:
+ assert np.isnan(arr[:, 0]).all()
+ else:
+ assert (arr[:, 0] == -1).all()
# i.e. we didn't create a new 49-column block
assert len(df._mgr.arrays) == 1
@@ -676,14 +679,18 @@ def test_fillna_inplace_with_columns_limit_and_value(self):
@td.skip_array_manager_invalid_test
@pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}])
- def test_inplace_dict_update_view(self, val):
+ def test_inplace_dict_update_view(self, val, using_copy_on_write):
# GH#47188
df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]})
+ df_orig = df.copy()
result_view = df[:]
df.fillna(val, inplace=True)
expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]})
tm.assert_frame_equal(df, expected)
- tm.assert_frame_equal(result_view, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(result_view, df_orig)
+ else:
+ tm.assert_frame_equal(result_view, expected)
def test_single_block_df_with_horizontal_axis(self):
# GH 47713
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index 98f9d2670074d..7d6cf43c530a7 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -303,7 +303,10 @@ def test_interp_raise_on_all_object_dtype(self):
with pytest.raises(TypeError, match=msg):
df.interpolate()
- def test_interp_inplace(self):
+ def test_interp_inplace(self, using_copy_on_write):
+ # TODO(CoW) inplace keyword (it is still mutating the parent)
+ if using_copy_on_write:
+ pytest.skip("CoW: inplace keyword not yet handled")
df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]})
expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]})
result = df.copy()
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index b1594660caec6..f4443953a0d52 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -171,16 +171,21 @@ def test_rename_multiindex(self):
tm.assert_index_equal(renamed.index, new_index)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem copy/view
- def test_rename_nocopy(self, float_frame):
+ def test_rename_nocopy(self, float_frame, using_copy_on_write):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values)
- with tm.assert_produces_warning(None):
+ # TODO(CoW) this also shouldn't warn in case of CoW, but the heuristic
+ # checking if the array shares memory doesn't work if CoW happened
+ with tm.assert_produces_warning(FutureWarning if using_copy_on_write else None):
# This loc setitem already happens inplace, so no warning
# that this will change in the future
renamed.loc[:, "foo"] = 1.0
- assert (float_frame["C"] == 1.0).all()
+ if using_copy_on_write:
+ assert not (float_frame["C"] == 1.0).all()
+ else:
+ assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
diff --git a/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/pandas/tests/frame/methods/test_to_dict_of_blocks.py
index c81bed9d93cc4..eb9b78610a112 100644
--- a/pandas/tests/frame/methods/test_to_dict_of_blocks.py
+++ b/pandas/tests/frame/methods/test_to_dict_of_blocks.py
@@ -27,7 +27,7 @@ def test_copy_blocks(self, float_frame):
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
- def test_no_copy_blocks(self, float_frame):
+ def test_no_copy_blocks(self, float_frame, using_copy_on_write):
# GH#9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
@@ -38,8 +38,11 @@ def test_no_copy_blocks(self, float_frame):
if column in _df:
_df.loc[:, column] = _df[column] + 1
- # make sure we did change the original DataFrame
- assert _df[column].equals(df[column])
+ if not using_copy_on_write:
+ # make sure we did change the original DataFrame
+ assert _df[column].equals(df[column])
+ else:
+ assert not _df[column].equals(df[column])
def test_to_dict_of_blocks_item_cache():
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index d3257ac09a0ab..a35530100a425 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -140,22 +140,29 @@ def test_update_datetime_tz(self):
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
- def test_update_with_different_dtype(self):
+ def test_update_with_different_dtype(self, using_copy_on_write):
# GH#3217
df = DataFrame({"a": [1, 3], "b": [np.nan, 2]})
df["c"] = np.nan
- df["c"].update(Series(["foo"], index=[0]))
+ if using_copy_on_write:
+ df.update({"c": Series(["foo"], index=[0])})
+ else:
+ df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]})
tm.assert_frame_equal(df, expected)
@td.skip_array_manager_invalid_test
- def test_update_modify_view(self):
+ def test_update_modify_view(self, using_copy_on_write):
# GH#47188
df = DataFrame({"A": ["1", np.nan], "B": ["100", np.nan]})
df2 = DataFrame({"A": ["a", "x"], "B": ["100", "200"]})
+ df2_orig = df2.copy()
result_view = df2[:]
df2.update(df)
expected = DataFrame({"A": ["1", "x"], "B": ["100", "200"]})
tm.assert_frame_equal(df2, expected)
- tm.assert_frame_equal(result_view, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(result_view, df2_orig)
+ else:
+ tm.assert_frame_equal(result_view, expected)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index af092d433a846..cb97e2bfb6202 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -323,7 +323,9 @@ def test_attrs(self):
assert result.attrs == {"version": 1}
@pytest.mark.parametrize("allows_duplicate_labels", [True, False, None])
- def test_set_flags(self, allows_duplicate_labels, frame_or_series):
+ def test_set_flags(
+ self, allows_duplicate_labels, frame_or_series, using_copy_on_write
+ ):
obj = DataFrame({"A": [1, 2]})
key = (0, 0)
if frame_or_series is Series:
@@ -345,15 +347,25 @@ def test_set_flags(self, allows_duplicate_labels, frame_or_series):
assert obj.flags.allows_duplicate_labels is True
# But we didn't copy data
+ if frame_or_series is Series:
+ assert np.may_share_memory(obj.values, result.values)
+ else:
+ assert np.may_share_memory(obj["A"].values, result["A"].values)
+
result.iloc[key] = 0
- assert obj.iloc[key] == 0
+ if using_copy_on_write:
+ assert obj.iloc[key] == 1
+ else:
+ assert obj.iloc[key] == 0
+ # set back to 1 for test below
+ result.iloc[key] = 1
# Now we do copy.
result = obj.set_flags(
copy=True, allows_duplicate_labels=allows_duplicate_labels
)
result.iloc[key] = 10
- assert obj.iloc[key] == 0
+ assert obj.iloc[key] == 1
def test_constructor_expanddim(self):
# GH#33628 accessing _constructor_expanddim should not raise NotImplementedError
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 8aa0e980b01c4..46c712cf4d458 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -334,7 +334,7 @@ def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
- def test_stale_cached_series_bug_473(self):
+ def test_stale_cached_series_bug_473(self, using_copy_on_write):
# this is chained, but ok
with option_context("chained_assignment", None):
@@ -349,9 +349,12 @@ def test_stale_cached_series_bug_473(self):
repr(Y)
result = Y.sum() # noqa
exp = Y["g"].sum() # noqa
- assert pd.isna(Y["g"]["c"])
+ if using_copy_on_write:
+ assert not pd.isna(Y["g"]["c"])
+ else:
+ assert pd.isna(Y["g"]["c"])
- def test_strange_column_corruption_issue(self):
+ def test_strange_column_corruption_issue(self, using_copy_on_write):
# TODO(wesm): Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = np.nan
@@ -363,7 +366,10 @@ def test_strange_column_corruption_issue(self):
if col not in wasCol:
wasCol[col] = 1
df[col] = np.nan
- df[col][dt] = i
+ if using_copy_on_write:
+ df.loc[dt, col] = i
+ else:
+ df[col][dt] = i
myid = 100
@@ -396,7 +402,7 @@ def test_add_column_with_pandas_array(self):
tm.assert_frame_equal(df, df2)
-def test_update_inplace_sets_valid_block_values():
+def test_update_inplace_sets_valid_block_values(using_copy_on_write):
# https://github.com/pandas-dev/pandas/issues/33457
df = DataFrame({"a": Series([1, 2, None], dtype="category")})
@@ -406,8 +412,9 @@ def test_update_inplace_sets_valid_block_values():
# check we haven't put a Series into any block.values
assert isinstance(df._mgr.blocks[0].values, Categorical)
- # smoketest for OP bug from GH#35731
- assert df.isnull().sum().sum() == 0
+ if not using_copy_on_write:
+ # smoketest for OP bug from GH#35731
+ assert df.isnull().sum().sum() == 0
def test_nonconsolidated_item_cache_take():
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 4c2e9b8530e81..6ad2f35bd2a6a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -271,15 +271,22 @@ def test_constructor_dtype_copy(self):
new_df["col1"] = 200.0
assert orig_df["col1"][0] == 1.0
- def test_constructor_dtype_nocast_view_dataframe(self):
+ def test_constructor_dtype_nocast_view_dataframe(self, using_copy_on_write):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
- should_be_view[0][0] = 99
- assert df.values[0, 0] == 99
+ if using_copy_on_write:
+ # INFO(CoW) doesn't mutate original
+ should_be_view.iloc[0, 0] = 99
+ assert df.values[0, 0] == 1
+ else:
+ should_be_view[0][0] = 99
+ assert df.values[0, 0] == 99
- def test_constructor_dtype_nocast_view_2d_array(self, using_array_manager):
+ def test_constructor_dtype_nocast_view_2d_array(
+ self, using_array_manager, using_copy_on_write
+ ):
df = DataFrame([[1, 2], [3, 4]], dtype="int64")
- if not using_array_manager:
+ if not using_array_manager and not using_copy_on_write:
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
@@ -2523,7 +2530,13 @@ def test_constructor_list_str_na(self, string_dtype):
@pytest.mark.parametrize("copy", [False, True])
def test_dict_nocopy(
- self, request, copy, any_numeric_ea_dtype, any_numpy_dtype, using_array_manager
+ self,
+ request,
+ copy,
+ any_numeric_ea_dtype,
+ any_numpy_dtype,
+ using_array_manager,
+ using_copy_on_write,
):
if (
using_array_manager
@@ -2597,7 +2610,7 @@ def check_views(c_only: bool = False):
with tm.assert_produces_warning(FutureWarning, match="will attempt to set"):
df.iloc[:, 2] = pd.array([45, 46], dtype=c.dtype)
assert df.dtypes.iloc[2] == c.dtype
- if not copy:
+ if not copy and not using_copy_on_write:
check_views(True)
if copy:
@@ -2609,7 +2622,7 @@ def check_views(c_only: bool = False):
assert b[0] == b.dtype.type(3)
# FIXME(GH#35417): enable after GH#35417
assert c[0] == c_orig[0] # i.e. df.iloc[0, 2]=45 did *not* update c
- else:
+ elif not using_copy_on_write:
# TODO: we can call check_views if we stop consolidating
# in setitem_with_indexer
assert c[0] == 45 # i.e. df.iloc[0, 2]=45 *did* update c
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 72e7e458b4e1f..2cf1cf0f15652 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -12,16 +12,20 @@
class TestPeriodIndex:
- def test_getitem_periodindex_duplicates_string_slice(self):
+ def test_getitem_periodindex_duplicates_string_slice(self, using_copy_on_write):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
ts = Series(np.random.randn(len(idx)), index=idx)
+ original = ts.copy()
result = ts["2007"]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
- assert (ts[1:3] == 1).all()
+ if using_copy_on_write:
+ tm.assert_series_equal(ts, original)
+ else:
+ assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN")
diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
index 479cd9952f75b..2efb288a73f8d 100644
--- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py
@@ -12,7 +12,7 @@
import pandas._testing as tm
-def test_detect_chained_assignment():
+def test_detect_chained_assignment(using_copy_on_write):
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
@@ -29,17 +29,21 @@ def test_detect_chained_assignment():
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
zed["eyes"]["right"].fillna(value=555, inplace=True)
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ zed["eyes"]["right"].fillna(value=555, inplace=True)
@td.skip_array_manager_invalid_test # with ArrayManager df.loc[0] is not a view
-def test_cache_updating():
+def test_cache_updating(using_copy_on_write):
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
+ df_original = df.copy()
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
@@ -48,7 +52,10 @@ def test_cache_updating():
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
- assert result == 1
+ if using_copy_on_write:
+ assert result == df_original.loc[0, "z"]
+ else:
+ assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index a57b363c0a448..cface630c6647 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -122,7 +122,9 @@ def test_getitem_partial_column_select(self):
# TODO(ArrayManager) rewrite test to not use .values
# exp.loc[2000, 4].values[:] select multiple columns -> .values is not a view
@td.skip_array_manager_invalid_test
- def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
+ def test_partial_set(
+ self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write
+ ):
# GH #397
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd.copy()
@@ -132,7 +134,8 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
tm.assert_frame_equal(df, exp)
df["A"].loc[2000, 4] = 1
- exp["A"].loc[2000, 4].values[:] = 1
+ if not using_copy_on_write:
+ exp["A"].loc[2000, 4].values[:] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
@@ -141,7 +144,10 @@ def test_partial_set(self, multiindex_year_month_day_dataframe_random_data):
# this works...for now
df["A"].iloc[14] = 5
- assert df["A"].iloc[14] == 5
+ if using_copy_on_write:
+ df["A"].iloc[14] == exp["A"].iloc[14]
+ else:
+ assert df["A"].iloc[14] == 5
@pytest.mark.parametrize("dtype", [int, float])
def test_getitem_intkey_leading_level(
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index 20569061cfa4c..ac10a6d82dc89 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -196,7 +196,7 @@ def test_multiindex_assignment(self):
df.loc[4, "d"] = arr
tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d"))
- def test_multiindex_assignment_single_dtype(self, using_array_manager):
+ def test_multiindex_assignment_single_dtype(self, using_copy_on_write):
# GH3777 part 2b
# single dtype
arr = np.array([0.0, 1.0])
@@ -216,7 +216,8 @@ def test_multiindex_assignment_single_dtype(self, using_array_manager):
tm.assert_series_equal(result, exp)
# extra check for inplace-ness
- tm.assert_numpy_array_equal(view, exp.values)
+ if not using_copy_on_write:
+ tm.assert_numpy_array_equal(view, exp.values)
# arr + 0.5 cannot be cast losslessly to int, so we upcast
df.loc[4, "c"] = arr + 0.5
@@ -405,16 +406,23 @@ def test_setitem_change_dtype(self, multiindex_dataframe_random_data):
reindexed = dft.reindex(columns=[("foo", "two")])
tm.assert_series_equal(reindexed["foo", "two"], s > s.median())
- def test_set_column_scalar_with_loc(self, multiindex_dataframe_random_data):
+ def test_set_column_scalar_with_loc(
+ self, multiindex_dataframe_random_data, using_copy_on_write
+ ):
frame = multiindex_dataframe_random_data
subset = frame.index[[1, 4, 5]]
frame.loc[subset] = 99
assert (frame.loc[subset].values == 99).all()
+ frame_original = frame.copy()
col = frame["B"]
col[subset] = 97
- assert (frame.loc[subset, "B"] == 97).all()
+ if using_copy_on_write:
+ # chained setitem doesn't work with CoW
+ tm.assert_frame_equal(frame, frame_original)
+ else:
+ assert (frame.loc[subset, "B"] == 97).all()
def test_nonunique_assignment_1750(self):
df = DataFrame(
@@ -487,21 +495,32 @@ def test_frame_setitem_view_direct(multiindex_dataframe_random_data):
assert (df["foo"].values == 0).all()
-def test_frame_setitem_copy_raises(multiindex_dataframe_random_data):
+def test_frame_setitem_copy_raises(
+ multiindex_dataframe_random_data, using_copy_on_write
+):
# will raise/warn as its chained assignment
df = multiindex_dataframe_random_data.T
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) it would be nice if this could still warn/raise
df["foo"]["one"] = 2
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["foo"]["one"] = 2
-def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data):
+def test_frame_setitem_copy_no_write(
+ multiindex_dataframe_random_data, using_copy_on_write
+):
frame = multiindex_dataframe_random_data.T
expected = frame
df = frame.copy()
- msg = "A value is trying to be set on a copy of a slice from a DataFrame"
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df["foo"]["one"] = 2
+ else:
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["foo"]["one"] = 2
result = df
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index adc001695579c..81914e1b8052f 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -32,7 +32,7 @@ def random_text(nobs=100):
class TestCaching:
- def test_slice_consolidate_invalidate_item_cache(self):
+ def test_slice_consolidate_invalidate_item_cache(self, using_copy_on_write):
# this is chained assignment, but will 'work'
with option_context("chained_assignment", None):
@@ -52,7 +52,11 @@ def test_slice_consolidate_invalidate_item_cache(self):
# Assignment to wrong series
df["bb"].iloc[0] = 0.17
df._clear_item_cache()
- tm.assert_almost_equal(df["bb"][0], 0.17)
+ if not using_copy_on_write:
+ tm.assert_almost_equal(df["bb"][0], 0.17)
+ else:
+ # with ArrayManager, parent is not mutated with chained assignment
+ tm.assert_almost_equal(df["bb"][0], 2.2)
@pytest.mark.parametrize("do_ref", [True, False])
def test_setitem_cache_updating(self, do_ref):
@@ -71,7 +75,7 @@ def test_setitem_cache_updating(self, do_ref):
assert df.loc[0, "c"] == 0.0
assert df.loc[7, "c"] == 1.0
- def test_setitem_cache_updating_slices(self):
+ def test_setitem_cache_updating_slices(self, using_copy_on_write):
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame(
@@ -92,12 +96,17 @@ def test_setitem_cache_updating_slices(self):
# try via a chain indexing
# this actually works
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
+ out_original = out.copy()
for ix, row in df.iterrows():
v = out[row["C"]][six:eix] + row["D"]
out[row["C"]][six:eix] = v
- tm.assert_frame_equal(out, expected)
- tm.assert_series_equal(out["A"], expected["A"])
+ if not using_copy_on_write:
+ tm.assert_frame_equal(out, expected)
+ tm.assert_series_equal(out["A"], expected["A"])
+ else:
+ tm.assert_frame_equal(out, out_original)
+ tm.assert_series_equal(out["A"], out_original["A"])
out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014"))
for ix, row in df.iterrows():
@@ -123,7 +132,7 @@ def test_altering_series_clears_parent_cache(self):
class TestChaining:
- def test_setitem_chained_setfault(self):
+ def test_setitem_chained_setfault(self, using_copy_on_write):
# GH6026
data = ["right", "left", "left", "left", "right", "left", "timeout"]
@@ -132,24 +141,38 @@ def test_setitem_chained_setfault(self):
df = DataFrame({"response": np.array(data)})
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, DataFrame({"response": data}))
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata}))
recarray = np.rec.fromarrays([data], names=["response"])
df = DataFrame(recarray)
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, DataFrame({"response": data}))
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata}))
df = DataFrame({"response": data, "response1": data})
+ df_original = df.copy()
mask = df.response == "timeout"
df.response[mask] = "none"
- tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data}))
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_original)
+ else:
+ tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data}))
# GH 6056
expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
df["A"].iloc[0] = np.nan
result = df.head()
+ if using_copy_on_write:
+ expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]})
+ else:
+ expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]})
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])})
@@ -158,7 +181,7 @@ def test_setitem_chained_setfault(self):
tm.assert_frame_equal(result, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment(self):
+ def test_detect_chained_assignment(self, using_copy_on_write):
with option_context("chained_assignment", "raise"):
# work with the chain
@@ -166,14 +189,20 @@ def test_detect_chained_assignment(self):
df = DataFrame(
np.arange(4).reshape(2, 2), columns=list("AB"), dtype="int64"
)
+ df_original = df.copy()
assert df._is_copy is None
df["A"][0] = -5
df["A"][1] = -6
- tm.assert_frame_equal(df, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_original)
+ else:
+ tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_raises(self, using_array_manager):
+ def test_detect_chained_assignment_raises(
+ self, using_array_manager, using_copy_on_write
+ ):
# test with the chaining
df = DataFrame(
@@ -182,9 +211,14 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
"B": np.array(np.arange(2, 4), dtype=np.float64),
}
)
+ df_original = df.copy()
assert df._is_copy is None
- if not using_array_manager:
+ if using_copy_on_write:
+ df["A"][0] = -5
+ df["A"][1] = -6
+ tm.assert_frame_equal(df, df_original)
+ elif not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
df["A"][0] = -5
@@ -192,7 +226,6 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
df["A"][1] = np.nan
assert df["A"]._is_copy is None
-
else:
# INFO(ArrayManager) for ArrayManager it doesn't matter that it's
# a mixed dataframe
@@ -203,7 +236,7 @@ def test_detect_chained_assignment_raises(self, using_array_manager):
tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_fails(self):
+ def test_detect_chained_assignment_fails(self, using_copy_on_write):
# Using a copy (the chain), fails
df = DataFrame(
@@ -213,11 +246,15 @@ def test_detect_chained_assignment_fails(self):
}
)
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
df.loc[0]["A"] = -5
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[0]["A"] = -5
@pytest.mark.arm_slow
- def test_detect_chained_assignment_doc_example(self):
+ def test_detect_chained_assignment_doc_example(self, using_copy_on_write):
# Doc example
df = DataFrame(
@@ -228,30 +265,43 @@ def test_detect_chained_assignment_doc_example(self):
)
assert df._is_copy is None
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
indexer = df.a.str.startswith("o")
df[indexer]["c"] = 42
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ indexer = df.a.str.startswith("o")
+ df[indexer]["c"] = 42
@pytest.mark.arm_slow
- def test_detect_chained_assignment_object_dtype(self, using_array_manager):
+ def test_detect_chained_assignment_object_dtype(
+ self, using_array_manager, using_copy_on_write
+ ):
expected = DataFrame({"A": [111, "bbb", "ccc"], "B": [1, 2, 3]})
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
- df.loc[0]["A"] = 111
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[0]["A"] = 111
- if not using_array_manager:
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
+ df["A"][0] = 111
+ tm.assert_frame_equal(df, df_original)
+ elif not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
df["A"][0] = 111
df.loc[0, "A"] = 111
+ tm.assert_frame_equal(df, expected)
else:
# INFO(ArrayManager) for ArrayManager it doesn't matter that it's
# a mixed dataframe
df["A"][0] = 111
-
- tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df, expected)
@pytest.mark.arm_slow
def test_detect_chained_assignment_is_copy_pickle(self):
@@ -299,8 +349,9 @@ def test_detect_chained_assignment_implicit_take(self):
df["letters"] = df["letters"].apply(str.lower)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_implicit_take2(self):
-
+ def test_detect_chained_assignment_implicit_take2(self, using_copy_on_write):
+ if using_copy_on_write:
+ pytest.skip("_is_copy is not always set for CoW")
# Implicitly take 2
df = random_text(100000)
indexer = df.letters.apply(lambda x: len(x) > 10)
@@ -356,18 +407,26 @@ def test_detect_chained_assignment_false_positives(self):
str(df)
@pytest.mark.arm_slow
- def test_detect_chained_assignment_undefined_column(self):
+ def test_detect_chained_assignment_undefined_column(self, using_copy_on_write):
# from SO:
# https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
df = DataFrame(np.arange(0, 9), columns=["count"])
df["group"] = "b"
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) can we still warn here?
df.iloc[0:5]["group"] = "a"
+ tm.assert_frame_equal(df, df_original)
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.iloc[0:5]["group"] = "a"
@pytest.mark.arm_slow
- def test_detect_chained_assignment_changing_dtype(self, using_array_manager):
+ def test_detect_chained_assignment_changing_dtype(
+ self, using_array_manager, using_copy_on_write
+ ):
# Mixed type setting but same dtype & changing dtype
df = DataFrame(
@@ -378,32 +437,45 @@ def test_detect_chained_assignment_changing_dtype(self, using_array_manager):
"D": ["a", "b", "c", "d", "e"],
}
)
+ df_original = df.copy()
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df.loc[2]["D"] = "foo"
-
- with pytest.raises(SettingWithCopyError, match=msg):
df.loc[2]["C"] = "foo"
+ df["C"][2] = "foo"
+ tm.assert_frame_equal(df, df_original)
+
+ if not using_copy_on_write:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[2]["D"] = "foo"
- if not using_array_manager:
with pytest.raises(SettingWithCopyError, match=msg):
+ df.loc[2]["C"] = "foo"
+
+ if not using_array_manager:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df["C"][2] = "foo"
+ else:
+ # INFO(ArrayManager) for ArrayManager it doesn't matter if it's
+ # changing the dtype or not
df["C"][2] = "foo"
- else:
- # INFO(ArrayManager) for ArrayManager it doesn't matter if it's
- # changing the dtype or not
- df["C"][2] = "foo"
- assert df.loc[2, "C"] == "foo"
+ assert df.loc[2, "C"] == "foo"
- def test_setting_with_copy_bug(self):
+ def test_setting_with_copy_bug(self, using_copy_on_write):
# operating on a copy
df = DataFrame(
{"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]}
)
+ df_original = df.copy()
mask = pd.isna(df.c)
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
df[["c"]][mask] = df[["b"]][mask]
+ tm.assert_frame_equal(df, df_original)
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ df[["c"]][mask] = df[["b"]][mask]
def test_setting_with_copy_bug_no_warning(self):
# invalid warning as we are returning a new object
@@ -414,8 +486,12 @@ def test_setting_with_copy_bug_no_warning(self):
# this should not raise
df2["y"] = ["g", "h", "i"]
- def test_detect_chained_assignment_warnings_errors(self):
+ def test_detect_chained_assignment_warnings_errors(self, using_copy_on_write):
df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]})
+ if using_copy_on_write:
+ df.loc[0]["A"] = 111
+ return
+
with option_context("chained_assignment", "warn"):
with tm.assert_produces_warning(SettingWithCopyWarning):
df.loc[0]["A"] = 111
@@ -425,14 +501,23 @@ def test_detect_chained_assignment_warnings_errors(self):
df.loc[0]["A"] = 111
@pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})])
- def test_detect_chained_assignment_warning_stacklevel(self, rhs):
+ def test_detect_chained_assignment_warning_stacklevel(
+ self, rhs, using_copy_on_write
+ ):
# GH#42570
df = DataFrame(np.arange(25).reshape(5, 5))
+ df_original = df.copy()
chained = df.loc[:3]
with option_context("chained_assignment", "warn"):
- with tm.assert_produces_warning(SettingWithCopyWarning) as t:
- chained[2] = rhs
- assert t[0].filename == __file__
+ if not using_copy_on_write:
+ with tm.assert_produces_warning(SettingWithCopyWarning) as t:
+ chained[2] = rhs
+ assert t[0].filename == __file__
+ else:
+ # INFO(CoW) no warning, and original dataframe not changed
+ with tm.assert_produces_warning(None):
+ chained[2] = rhs
+ tm.assert_frame_equal(df, df_original)
# TODO(ArrayManager) fast_xs with array-like scalars is not yet working
@td.skip_array_manager_not_yet_implemented
@@ -483,7 +568,7 @@ def test_cache_updating2(self):
expected = Series([0, 0, 0, 2, 0], name="f")
tm.assert_series_equal(df.f, expected)
- def test_iloc_setitem_chained_assignment(self):
+ def test_iloc_setitem_chained_assignment(self, using_copy_on_write):
# GH#3970
with option_context("chained_assignment", None):
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
@@ -497,7 +582,10 @@ def test_iloc_setitem_chained_assignment(self):
df.iloc[ck]
df["bb"].iloc[0] = 0.15
- assert df["bb"].iloc[0] == 0.15
+ if not using_copy_on_write:
+ assert df["bb"].iloc[0] == 0.15
+ else:
+ assert df["bb"].iloc[0] == 2.2
def test_getitem_loc_assignment_slice_state(self):
# GH 13569
diff --git a/pandas/tests/indexing/test_iat.py b/pandas/tests/indexing/test_iat.py
index 44bd51ee1b7d1..916303884df88 100644
--- a/pandas/tests/indexing/test_iat.py
+++ b/pandas/tests/indexing/test_iat.py
@@ -31,7 +31,7 @@ def test_iat_getitem_series_with_period_index():
assert expected == result
-def test_iat_setitem_item_cache_cleared(indexer_ial):
+def test_iat_setitem_item_cache_cleared(indexer_ial, using_copy_on_write):
# GH#45684
data = {"x": np.arange(8, dtype=np.int64), "y": np.int64(0)}
df = DataFrame(data).copy()
@@ -44,5 +44,6 @@ def test_iat_setitem_item_cache_cleared(indexer_ial):
indexer_ial(df)[7, 1] = 1234
assert df.iat[7, 1] == 1234
- assert ser.iloc[-1] == 1234
+ if not using_copy_on_write:
+ assert ser.iloc[-1] == 1234
assert df.iloc[-1, -1] == 1234
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index fdf741040407f..8cc6b6e73aaea 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -123,7 +123,7 @@ def test_iloc_setitem_ea_inplace(self, frame_or_series, box):
if frame_or_series is Series:
values = obj.values
else:
- values = obj[0].values
+ values = obj._mgr.arrays[0]
if frame_or_series is Series:
obj.iloc[:2] = box(arr[2:])
@@ -843,7 +843,9 @@ def test_iloc_empty_list_indexer_is_ok(self):
df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
- def test_identity_slice_returns_new_object(self, using_array_manager, request):
+ def test_identity_slice_returns_new_object(
+ self, using_array_manager, using_copy_on_write, request
+ ):
# GH13873
if using_array_manager:
mark = pytest.mark.xfail(
@@ -859,8 +861,12 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert np.shares_memory(original_df["a"], sliced_df["a"])
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
+ # depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
- assert (sliced_df["a"] == 4).all()
+ if using_copy_on_write:
+ assert (sliced_df["a"] == [1, 2, 3]).all()
+ else:
+ assert (sliced_df["a"] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
@@ -868,7 +874,11 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
- assert all(sliced_series[:3] == [7, 8, 9])
+ if using_copy_on_write:
+ # shallow copy not updated (CoW)
+ assert all(sliced_series[:3] == [1, 2, 3])
+ else:
+ assert all(sliced_series[:3] == [7, 8, 9])
def test_indexing_zerodim_np_array(self):
# GH24919
@@ -884,9 +894,10 @@ def test_series_indexing_zerodim_np_array(self):
assert result == 1
@td.skip_array_manager_not_yet_implemented
- def test_iloc_setitem_categorical_updates_inplace(self):
+ def test_iloc_setitem_categorical_updates_inplace(self, using_copy_on_write):
# Mixed dtype ensures we go through take_split_path in setitem_with_indexer
cat = Categorical(["A", "B", "C"])
+ cat_original = cat.copy()
df = DataFrame({1: cat, 2: [1, 2, 3]}, copy=False)
assert tm.shares_memory(df[1], cat)
@@ -895,9 +906,13 @@ def test_iloc_setitem_categorical_updates_inplace(self):
msg = "will attempt to set the values inplace instead"
with tm.assert_produces_warning(FutureWarning, match=msg):
df.iloc[:, 0] = cat[::-1]
- assert tm.shares_memory(df[1], cat)
- expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"])
+ if not using_copy_on_write:
+ assert tm.shares_memory(df[1], cat)
+ expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"])
+ else:
+ expected = cat_original
+
tm.assert_categorical_equal(cat, expected)
def test_iloc_with_boolean_operation(self):
@@ -1395,8 +1410,9 @@ def test_frame_iloc_setitem_callable(self):
class TestILocSeries:
- def test_iloc(self):
+ def test_iloc(self, using_copy_on_write):
ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))
+ ser_original = ser.copy()
for i in range(len(ser)):
result = ser.iloc[i]
@@ -1412,7 +1428,10 @@ def test_iloc(self):
with tm.assert_produces_warning(None):
# GH#45324 make sure we aren't giving a spurious FutureWarning
result[:] = 0
- assert (ser.iloc[1:3] == 0).all()
+ if using_copy_on_write:
+ tm.assert_series_equal(ser, ser_original)
+ else:
+ assert (ser.iloc[1:3] == 0).all()
# list of integers
result = ser.iloc[[0, 2, 3, 4, 5]]
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index a7c03c672be58..b13177ad940eb 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1066,7 +1066,9 @@ def test_loc_empty_list_indexer_is_ok(self):
df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
- def test_identity_slice_returns_new_object(self, using_array_manager, request):
+ def test_identity_slice_returns_new_object(
+ self, using_array_manager, request, using_copy_on_write
+ ):
# GH13873
if using_array_manager:
mark = pytest.mark.xfail(
@@ -1083,8 +1085,12 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values)
# Setting using .loc[:, "a"] sets inplace so alters both sliced and orig
+ # depending on CoW
original_df.loc[:, "a"] = [4, 4, 4]
- assert (sliced_df["a"] == 4).all()
+ if using_copy_on_write:
+ assert (sliced_df["a"] == [1, 2, 3]).all()
+ else:
+ assert (sliced_df["a"] == 4).all()
# These should not return copies
assert original_df is original_df.loc[:, :]
@@ -1098,7 +1104,10 @@ def test_identity_slice_returns_new_object(self, using_array_manager, request):
assert original_series[:] is not original_series
original_series[:3] = [7, 8, 9]
- assert all(sliced_series[:3] == [7, 8, 9])
+ if using_copy_on_write:
+ assert all(sliced_series[:3] == [1, 2, 3])
+ else:
+ assert all(sliced_series[:3] == [7, 8, 9])
@pytest.mark.xfail(reason="accidental fix reverted - GH37497")
def test_loc_copy_vs_view(self):
@@ -2558,7 +2567,7 @@ def test_loc_setitem_boolean_and_column(self, float_frame):
tm.assert_frame_equal(float_frame, expected)
- def test_loc_setitem_ndframe_values_alignment(self):
+ def test_loc_setitem_ndframe_values_alignment(self, using_copy_on_write):
# GH#45501
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df.loc[[False, False, True], ["a"]] = DataFrame(
@@ -2579,9 +2588,13 @@ def test_loc_setitem_ndframe_values_alignment(self):
tm.assert_frame_equal(df, expected)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ df_orig = df.copy()
ser = df["a"]
ser.loc[[False, False, True]] = Series([10, 11, 12], index=[2, 1, 0])
- tm.assert_frame_equal(df, expected)
+ if using_copy_on_write:
+ tm.assert_frame_equal(df, df_orig)
+ else:
+ tm.assert_frame_equal(df, expected)
class TestLocListlike:
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 2f3b569c899e1..e4f3b8847238f 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -744,7 +744,7 @@ def test_reindex_items(self):
mgr.iget(3).internal_values(), reindexed.iget(3).internal_values()
)
- def test_get_numeric_data(self):
+ def test_get_numeric_data(self, using_copy_on_write):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
@@ -765,10 +765,16 @@ def test_get_numeric_data(self):
np.array([100.0, 200.0, 300.0]),
inplace=True,
)
- tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- np.array([100.0, 200.0, 300.0]),
- )
+ if using_copy_on_write:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([1.0, 1.0, 1.0]),
+ )
+ else:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([100.0, 200.0, 300.0]),
+ )
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"]))
@@ -777,12 +783,18 @@ def test_get_numeric_data(self):
np.array([1000.0, 2000.0, 3000.0]),
inplace=True,
)
- tm.assert_almost_equal(
- mgr.iget(mgr.items.get_loc("float")).internal_values(),
- np.array([100.0, 200.0, 300.0]),
- )
+ if using_copy_on_write:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([1.0, 1.0, 1.0]),
+ )
+ else:
+ tm.assert_almost_equal(
+ mgr.iget(mgr.items.get_loc("float")).internal_values(),
+ np.array([100.0, 200.0, 300.0]),
+ )
- def test_get_bool_data(self):
+ def test_get_bool_data(self, using_copy_on_write):
msg = "object-dtype columns with all-bool values"
mgr = create_mgr(
"int: int; float: float; complex: complex;"
@@ -800,19 +812,31 @@ def test_get_bool_data(self):
)
bools.iset(0, np.array([True, False, True]), inplace=True)
- tm.assert_numpy_array_equal(
- mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
- )
+ if using_copy_on_write:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, True, True]),
+ )
+ else:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, False, True]),
+ )
# Check sharing
with tm.assert_produces_warning(FutureWarning, match=msg):
bools2 = mgr.get_bool_data(copy=True)
bools2.iset(0, np.array([False, True, False]))
- tm.assert_numpy_array_equal(
- mgr.iget(mgr.items.get_loc("bool")).internal_values(),
- np.array([True, False, True]),
- )
+ if using_copy_on_write:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, True, True]),
+ )
+ else:
+ tm.assert_numpy_array_equal(
+ mgr.iget(mgr.items.get_loc("bool")).internal_values(),
+ np.array([True, False, True]),
+ )
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr("b,\u05d0: object"))
diff --git a/pandas/tests/reshape/test_from_dummies.py b/pandas/tests/reshape/test_from_dummies.py
index c52331e54f95e..ab80473725288 100644
--- a/pandas/tests/reshape/test_from_dummies.py
+++ b/pandas/tests/reshape/test_from_dummies.py
@@ -164,7 +164,7 @@ def test_error_with_prefix_default_category_dict_not_complete(
def test_error_with_prefix_contains_nan(dummies_basic):
- dummies_basic["col2_c"][2] = np.nan
+ dummies_basic.loc[2, "col2_c"] = np.nan
with pytest.raises(
ValueError, match=r"Dummy DataFrame contains NA value in column: 'col2_c'"
):
@@ -172,7 +172,7 @@ def test_error_with_prefix_contains_nan(dummies_basic):
def test_error_with_prefix_contains_non_dummies(dummies_basic):
- dummies_basic["col2_c"][2] = "str"
+ dummies_basic.loc[2, "col2_c"] = "str"
with pytest.raises(TypeError, match=r"Passed DataFrame contains non-dummy data"):
from_dummies(dummies_basic, sep="_")
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py
index afb6d0f19daca..adee227c0f0ac 100644
--- a/pandas/tests/series/accessors/test_dt_accessor.py
+++ b/pandas/tests/series/accessors/test_dt_accessor.py
@@ -279,7 +279,7 @@ def test_dt_accessor_ambiguous_freq_conversions(self):
expected = Series(exp_values, name="xxx")
tm.assert_series_equal(ser, expected)
- def test_dt_accessor_not_writeable(self):
+ def test_dt_accessor_not_writeable(self, using_copy_on_write):
# no setting allowed
ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")
with pytest.raises(ValueError, match="modifications"):
@@ -288,8 +288,12 @@ def test_dt_accessor_not_writeable(self):
# trying to set a copy
msg = "modifications to a property of a datetimelike.+not supported"
with pd.option_context("chained_assignment", "raise"):
- with pytest.raises(SettingWithCopyError, match=msg):
+ if using_copy_on_write:
+ # TODO(CoW) it would be nice to keep a warning/error for this case
ser.dt.hour[0] = 5
+ else:
+ with pytest.raises(SettingWithCopyError, match=msg):
+ ser.dt.hour[0] = 5
@pytest.mark.parametrize(
"method, dates",
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 7a3e18c64f366..6ddffd0d006dc 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -209,7 +209,8 @@ def test_basic_getitem_setitem_corner(datetime_series):
datetime_series[[5, slice(None, None)]] = 2
-def test_slice(string_series, object_series):
+def test_slice(string_series, object_series, using_copy_on_write):
+ original = string_series.copy()
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
objSlice = object_series[10:20]
@@ -227,7 +228,11 @@ def test_slice(string_series, object_series):
sl = string_series[10:20]
sl[:] = 0
- assert (string_series[10:20] == 0).all()
+ if using_copy_on_write:
+ # Doesn't modify parent (CoW)
+ tm.assert_series_equal(string_series, original)
+ else:
+ assert (string_series[10:20] == 0).all()
def test_timedelta_assignment():
@@ -244,7 +249,7 @@ def test_timedelta_assignment():
tm.assert_series_equal(s, expected)
-def test_underlying_data_conversion():
+def test_underlying_data_conversion(using_copy_on_write):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})
msg = "The 'inplace' keyword"
@@ -253,15 +258,19 @@ def test_underlying_data_conversion():
assert return_value is None
s = Series([1], index=[(2, 2, 2)])
df["val"] = 0
+ df_original = df.copy()
df
df["val"].update(s)
- expected = DataFrame(
- {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
- )
- with tm.assert_produces_warning(FutureWarning, match=msg):
- return_value = expected.set_index(["a", "b", "c"], inplace=True)
- assert return_value is None
+ if using_copy_on_write:
+ expected = df_original
+ else:
+ expected = DataFrame(
+ {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ return_value = expected.set_index(["a", "b", "c"], inplace=True)
+ assert return_value is None
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/series/methods/test_copy.py b/pandas/tests/series/methods/test_copy.py
index 8aa5c14812dc0..d681c0d02e0a2 100644
--- a/pandas/tests/series/methods/test_copy.py
+++ b/pandas/tests/series/methods/test_copy.py
@@ -9,20 +9,28 @@
class TestCopy:
- @pytest.mark.parametrize("deep", [None, False, True])
- def test_copy(self, deep):
+ @pytest.mark.parametrize("deep", ["default", None, False, True])
+ def test_copy(self, deep, using_copy_on_write):
ser = Series(np.arange(10), dtype="float64")
# default deep is True
- if deep is None:
+ if deep == "default":
ser2 = ser.copy()
else:
ser2 = ser.copy(deep=deep)
+ if using_copy_on_write:
+ # INFO(CoW) a shallow copy doesn't yet copy the data
+ # but parent will not be modified (CoW)
+ if deep is None or deep is False:
+ assert np.may_share_memory(ser.values, ser2.values)
+ else:
+ assert not np.may_share_memory(ser.values, ser2.values)
+
ser2[::2] = np.NaN
- if deep is None or deep is True:
+ if deep is not False or using_copy_on_write:
# Did not modify original Series
assert np.isnan(ser2[0])
assert not np.isnan(ser[0])
@@ -31,8 +39,8 @@ def test_copy(self, deep):
assert np.isnan(ser2[0])
assert np.isnan(ser[0])
- @pytest.mark.parametrize("deep", [None, False, True])
- def test_copy_tzaware(self, deep):
+ @pytest.mark.parametrize("deep", ["default", None, False, True])
+ def test_copy_tzaware(self, deep, using_copy_on_write):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
@@ -40,15 +48,23 @@ def test_copy_tzaware(self, deep):
ser = Series([Timestamp("2012/01/01", tz="UTC")])
- if deep is None:
+ if deep == "default":
ser2 = ser.copy()
else:
ser2 = ser.copy(deep=deep)
+ if using_copy_on_write:
+ # INFO(CoW) a shallow copy doesn't yet copy the data
+ # but parent will not be modified (CoW)
+ if deep is None or deep is False:
+ assert np.may_share_memory(ser.values, ser2.values)
+ else:
+ assert not np.may_share_memory(ser.values, ser2.values)
+
ser2[0] = Timestamp("1999/01/01", tz="UTC")
# default deep is True
- if deep is None or deep is True:
+ if deep is not False or using_copy_on_write:
# Did not modify original Series
tm.assert_series_equal(ser2, expected2)
tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/methods/test_get_numeric_data.py b/pandas/tests/series/methods/test_get_numeric_data.py
index e386f4b5b1dec..60dd64d7e1948 100644
--- a/pandas/tests/series/methods/test_get_numeric_data.py
+++ b/pandas/tests/series/methods/test_get_numeric_data.py
@@ -7,13 +7,20 @@
class TestGetNumericData:
- def test_get_numeric_data_preserve_dtype(self):
+ def test_get_numeric_data_preserve_dtype(self, using_copy_on_write):
# get the numeric data
obj = Series([1, 2, 3])
result = obj._get_numeric_data()
tm.assert_series_equal(result, obj)
+ # returned object is a shallow copy
+ result.iloc[0] = 0
+ if using_copy_on_write:
+ assert obj.iloc[0] == 1
+ else:
+ assert obj.iloc[0] == 0
+
obj = Series([1, "2", 3.0])
result = obj._get_numeric_data()
expected = Series([], dtype=object, index=Index([], dtype=object))
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index 729c07b8bdde7..d0392929cb082 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -143,10 +143,15 @@ def test_rename_error_arg(self):
with pytest.raises(KeyError, match=match):
ser.rename({2: 9}, errors="raise")
- def test_rename_copy_false(self):
+ def test_rename_copy_false(self, using_copy_on_write):
# GH 46889
ser = Series(["foo", "bar"])
+ ser_orig = ser.copy()
shallow_copy = ser.rename({1: 9}, copy=False)
ser[0] = "foobar"
- assert ser[0] == shallow_copy[0]
- assert ser[1] == shallow_copy[9]
+ if using_copy_on_write:
+ assert ser_orig[0] == shallow_copy[0]
+ assert ser_orig[1] == shallow_copy[9]
+ else:
+ assert ser[0] == shallow_copy[0]
+ assert ser[1] == shallow_copy[9]
diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py
index d9d6641d54237..6403fcf76122a 100644
--- a/pandas/tests/series/methods/test_update.py
+++ b/pandas/tests/series/methods/test_update.py
@@ -14,7 +14,7 @@
class TestUpdate:
- def test_update(self):
+ def test_update(self, using_copy_on_write):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
@@ -25,11 +25,15 @@ def test_update(self):
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
+ df_orig = df.copy()
df["c"].update(Series(["foo"], index=[0]))
- expected = DataFrame(
- [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
- )
+ if using_copy_on_write:
+ expected = df_orig
+ else:
+ expected = DataFrame(
+ [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
+ )
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
| This is a port of the proof of concept using the ArrayManager in https://github.com/pandas-dev/pandas/pull/41878 to the default BlockManager.
This PR is a start to implement the proposal described in more detail in https://docs.google.com/document/d/1ZCQ9mx3LBMy-nhwRl33_jgcvWo9IWdEfxDNQ2thyTb0/edit / discussed in https://github.com/pandas-dev/pandas/issues/36195
A very brief summary of the behaviour you get:
- *Any* subset (so also a slice, single column access, etc) _behaves_ as a copy (using CoW, or is already a copy)
- DataFrame methods that return a new DataFrame return shallow copies (using CoW) if applicable (for now, this is only implemented / tested for `reset_index` and `rename`, needs to be expanded to other methods)
**Implementation approach**
This PR adds Copy-on-Write (CoW) functionality to the DataFrame/Series at the BlockManager level. It does this by adding a new `.refs` attribute to the `BlockManager` that, if populated, keeps a list of `weakref` references to the blocks it shares data with (so for the BlockManager, this reference tracking is done per block, so `len(mgr.blocks) == len(mgr.refs)`).
This ensures that if we are modifying a block of a child manager, we can check if it is referencing (viewing) another block, and if needed do a copy on write. And also if we are modifying a block of a parent manager, we can check if that block is being referenced by another manager and if needed do a copy on write in this parent frame. (of course, a manager can both be parent and child at the same time, so those two checks always happen both)
---
**How to enable this new behaviour?**
Currently this PR simply enabled the new behaviour with CoW, but of course that will need to be turned off before merging (which also means that some of the changes will need to put behind a feature flag. I only did that now in some places).
I think that ideally, (on the short term) users have a way to enable the future behaviour (eg using an option), but _also_ have a way to enable additional warnings.
I already started adding an option, currently the boolean flag `options.mode.copy_on_write=True|False`:
* Do we have a better name? I personally don't like that it uses "copy_on_write", because this is the internal implementation detail, and not what most end users really have to care about. But something like "new_copy_view_behaviour" is also not super ..
* In addition to True/False, we can probably add "warn" as a third option, which gives warnings in cases where behaviour would change.
---
Some notes:
- Not everything is already implemented (there are a couple of `TODO(CoW)` in the code), although the majority for indexing / setitem is done.
- This PR does not yet try to tackle copy/view behaviour for the constructors, or for numpy array access (`.values`). Given the size of this PR already, those can probably be done in separate PRs?
- Most tests are already passing (with changes), but still need to fix a few tests outside of /indexing
- We will also need to think about a way to test this (in a similar way as the ArrayManager with an environment variable?)
I will also pull out some of the changes in separate PRs (eg the new test file could already be discussed/reviewed separately (-> https://github.com/pandas-dev/pandas/pull/46979), and the `column_setitem` is maybe also something that could be done as pre-cursor(-> https://github.com/pandas-dev/pandas/pull/47074)) | https://api.github.com/repos/pandas-dev/pandas/pulls/46958 | 2022-05-06T21:54:32Z | 2022-08-20T18:45:23Z | 2022-08-20T18:45:23Z | 2022-10-07T08:43:02Z |
DOC: fix cookbook groupby & transform example | diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index d0b2119f9d315..daf5a0e481b8e 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -511,7 +511,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
def replace(g):
mask = g < 0
- return g.where(mask, g[~mask].mean())
+ return g.where(~mask, g[~mask].mean())
gb.transform(replace)
| In the example, all negative values of a group should be replaced by the mean of the rest of the group. The linked stackoverflow output has it right: https://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means
We have to negate the first argument of `DataFrame.where` here, since this should be the condition when the value stays the same (and this should be the case for `g>=0`.)
pandas `df.where` behavior is pretty weird when coming from Spark world :)
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46957 | 2022-05-06T20:24:27Z | 2022-05-07T02:24:07Z | 2022-05-07T02:24:07Z | 2022-05-07T02:24:15Z |
CLN: Remove special case for rank in groupby.ops | diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 03f318d08d8cb..7f5fe85e07f40 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1325,8 +1325,8 @@ def group_rank(
mask=sub_mask,
)
for i in range(len(result)):
- # TODO: why can't we do out[:, k] = result?
- out[i, k] = result[i]
+ if labels[i] >= 0:
+ out[i, k] = result[i]
# ----------------------------------------------------------------------
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index a769c92e0b542..7285824f0111f 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -623,10 +623,6 @@ def _call_cython_op(
result = result.T
- if self.how == "rank" and self.has_dropped_na:
- # TODO: Wouldn't need this if group_rank supported mask
- result = np.where(comp_ids < 0, np.nan, result)
-
if self.how not in self.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cast_blocklist we get here
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46953 | 2022-05-06T02:46:48Z | 2022-05-06T17:02:37Z | 2022-05-06T17:02:37Z | 2022-05-06T17:06:08Z |
CI/DOC: Fix to_hdf docstring validation | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c7013cb95f670..e1459a66a0f12 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2601,11 +2601,6 @@ def to_hdf(
followed by fallback to "fixed".
index : bool, default True
Write DataFrame index as a column.
- errors : str, default 'strict'
- Specifies how encoding and decoding errors are to be handled.
- See the errors argument for :func:`open` for a full list
- of options.
- encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
@@ -2616,8 +2611,15 @@ def to_hdf(
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
- of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
+ of the object are indexed. See
+ :ref:`Query via data columns<io.hdf5-query-data-columns>`. for
+ more information.
Applicable only to format='table'.
+ errors : str, default 'strict'
+ Specifies how encoding and decoding errors are to be handled.
+ See the errors argument for :func:`open` for a full list
+ of options.
+ encoding : str, default "UTF-8"
See Also
--------
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/46949 | 2022-05-05T21:26:14Z | 2022-05-06T02:23:50Z | 2022-05-06T02:23:50Z | 2022-05-06T04:18:47Z |
PERF: Remove unnecessary asof join functions | diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 9238d36e0ee16..cc7d863bf326c 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -839,11 +839,16 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
by_t[:] left_by_values,
by_t[:] right_by_values,
bint allow_exact_matches=True,
- tolerance=None):
+ tolerance=None,
+ bint use_hashtable=True):
cdef:
ndarray[intp_t] bli, bri, fli, fri
+ ndarray[intp_t] left_indexer, right_indexer
+ Py_ssize_t left_size, i
+ numeric_t bdiff, fdiff
+
# search both forward and backward
bli, bri = asof_join_backward_on_X_by_Y(
left_values,
@@ -852,6 +857,7 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
right_by_values,
allow_exact_matches,
tolerance,
+ use_hashtable
)
fli, fri = asof_join_forward_on_X_by_Y(
left_values,
@@ -860,26 +866,11 @@ def asof_join_nearest_on_X_by_Y(numeric_t[:] left_values,
right_by_values,
allow_exact_matches,
tolerance,
+ use_hashtable
)
- return _choose_smaller_timestamp(left_values, right_values, bli, bri, fli, fri)
-
-
-cdef _choose_smaller_timestamp(
- numeric_t[:] left_values,
- numeric_t[:] right_values,
- ndarray[intp_t] bli,
- ndarray[intp_t] bri,
- ndarray[intp_t] fli,
- ndarray[intp_t] fri,
-):
- cdef:
- ndarray[intp_t] left_indexer, right_indexer
- Py_ssize_t left_size, i
- numeric_t bdiff, fdiff
-
+ # choose the smaller timestamp
left_size = len(left_values)
-
left_indexer = np.empty(left_size, dtype=np.intp)
right_indexer = np.empty(left_size, dtype=np.intp)
@@ -894,55 +885,3 @@ cdef _choose_smaller_timestamp(
left_indexer[i] = bli[i]
return left_indexer, right_indexer
-
-
-# ----------------------------------------------------------------------
-# asof_join
-# ----------------------------------------------------------------------
-
-def asof_join_backward(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
-
- return asof_join_backward_on_X_by_Y(
- left_values,
- right_values,
- None,
- None,
- allow_exact_matches=allow_exact_matches,
- tolerance=tolerance,
- use_hashtable=False,
- )
-
-
-def asof_join_forward(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
- return asof_join_forward_on_X_by_Y(
- left_values,
- right_values,
- None,
- None,
- allow_exact_matches=allow_exact_matches,
- tolerance=tolerance,
- use_hashtable=False,
- )
-
-
-def asof_join_nearest(numeric_t[:] left_values,
- numeric_t[:] right_values,
- bint allow_exact_matches=True,
- tolerance=None):
-
- cdef:
- ndarray[intp_t] bli, bri, fli, fri
-
- # search both forward and backward
- bli, bri = asof_join_backward(left_values, right_values,
- allow_exact_matches, tolerance)
- fli, fri = asof_join_forward(left_values, right_values,
- allow_exact_matches, tolerance)
-
- return _choose_smaller_timestamp(left_values, right_values, bli, bri, fli, fri)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index fbcf8a88d2fee..4227d43c459d0 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1691,11 +1691,6 @@ def get_result(self) -> DataFrame:
return result
-def _asof_function(direction: str):
- name = f"asof_join_{direction}"
- return getattr(libjoin, name, None)
-
-
def _asof_by_function(direction: str):
name = f"asof_join_{direction}_on_X_by_Y"
return getattr(libjoin, name, None)
@@ -2017,8 +2012,16 @@ def injection(obj):
)
else:
# choose appropriate function by type
- func = _asof_function(self.direction)
- return func(left_values, right_values, self.allow_exact_matches, tolerance)
+ func = _asof_by_function(self.direction)
+ return func(
+ left_values,
+ right_values,
+ None,
+ None,
+ self.allow_exact_matches,
+ tolerance,
+ False,
+ )
def _get_multiindex_indexer(
| Several functions in the join cython are module are basically just calling others, and can probably be removed. This wouldn't
be a big deal inside python, but in this case it cuts down considerably on the amount of generated cython code.
The tests pass for me locally when I do this and there seems to be significant space savings. | https://api.github.com/repos/pandas-dev/pandas/pulls/46943 | 2022-05-04T20:11:58Z | 2022-05-06T21:29:50Z | 2022-05-06T21:29:50Z | 2022-09-03T16:04:41Z |
TYP: narrow type bounds on extract_array | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 393eb2997f6f0..888e943488953 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -453,30 +453,34 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> npt.NDArray[np.bool_]:
else:
values = extract_array(values, extract_numpy=True, extract_range=True)
- comps = _ensure_arraylike(comps)
- comps = extract_array(comps, extract_numpy=True)
- if not isinstance(comps, np.ndarray):
+ comps_array = _ensure_arraylike(comps)
+ comps_array = extract_array(comps_array, extract_numpy=True)
+ if not isinstance(comps_array, np.ndarray):
# i.e. Extension Array
- return comps.isin(values)
+ return comps_array.isin(values)
- elif needs_i8_conversion(comps.dtype):
+ elif needs_i8_conversion(comps_array.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
- return pd_array(comps).isin(values)
- elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
- # e.g. comps are integers and values are datetime64s
- return np.zeros(comps.shape, dtype=bool)
+ return pd_array(comps_array).isin(values)
+ elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype):
+ # e.g. comps_array are integers and values are datetime64s
+ return np.zeros(comps_array.shape, dtype=bool)
# TODO: not quite right ... Sparse/Categorical
elif needs_i8_conversion(values.dtype):
- return isin(comps, values.astype(object))
+ return isin(comps_array, values.astype(object))
elif isinstance(values.dtype, ExtensionDtype):
- return isin(np.asarray(comps), np.asarray(values))
+ return isin(np.asarray(comps_array), np.asarray(values))
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
- if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
+ if (
+ len(comps_array) > 1_000_000
+ and len(values) <= 26
+ and not is_object_dtype(comps_array)
+ ):
# If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
@@ -488,12 +492,12 @@ def f(c, v):
f = np.in1d
else:
- common = np.find_common_type([values.dtype, comps.dtype], [])
+ common = np.find_common_type([values.dtype, comps_array.dtype], [])
values = values.astype(common, copy=False)
- comps = comps.astype(common, copy=False)
+ comps_array = comps_array.astype(common, copy=False)
f = htable.ismember
- return f(comps, values)
+ return f(comps_array, values)
def factorize_array(
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 01a04b7aa63d9..4c8d3db7b4672 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -447,9 +447,7 @@ def __init__(
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
- # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
- # attribute "_codes"
- old_codes = extract_array(values)._codes # type: ignore[union-attr]
+ old_codes = extract_array(values)._codes
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories, copy=copy
)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 9ced8f225c3a8..1930580b63b79 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1235,13 +1235,7 @@ def _addsub_object_array(self, other: np.ndarray, op):
res_values = op(self.astype("O"), np.asarray(other))
result = pd_array(res_values.ravel())
- # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no attribute
- # "reshape"
- result = extract_array(
- result, extract_numpy=True
- ).reshape( # type: ignore[union-attr]
- self.shape
- )
+ result = extract_array(result, extract_numpy=True).reshape(self.shape)
return result
def _time_shift(
diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 17cdf6665aa99..434302b39fef9 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -9,8 +9,11 @@
from typing import (
TYPE_CHECKING,
Any,
+ Optional,
Sequence,
+ Union,
cast,
+ overload,
)
import warnings
@@ -18,11 +21,13 @@
import numpy.ma as ma
from pandas._libs import lib
+from pandas._libs.tslibs.period import Period
from pandas._typing import (
AnyArrayLike,
ArrayLike,
Dtype,
DtypeObj,
+ T,
)
from pandas.errors import IntCastingNaNError
from pandas.util._exceptions import find_stack_level
@@ -329,7 +334,8 @@ def array(
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
- return PeriodArray._from_sequence(data, copy=copy)
+ period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data)
+ return PeriodArray._from_sequence(period_data, copy=copy)
elif inferred_dtype == "interval":
return IntervalArray(data, copy=copy)
@@ -376,9 +382,23 @@ def array(
return PandasArray._from_sequence(data, dtype=dtype, copy=copy)
+@overload
def extract_array(
- obj: object, extract_numpy: bool = False, extract_range: bool = False
-) -> Any | ArrayLike:
+ obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ...
+) -> ArrayLike:
+ ...
+
+
+@overload
+def extract_array(
+ obj: T, extract_numpy: bool = ..., extract_range: bool = ...
+) -> T | ArrayLike:
+ ...
+
+
+def extract_array(
+ obj: T, extract_numpy: bool = False, extract_range: bool = False
+) -> T | ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
@@ -425,12 +445,15 @@ def extract_array(
if isinstance(obj, ABCRangeIndex):
if extract_range:
return obj._values
- return obj
+ # https://github.com/python/mypy/issues/1081
+ # error: Incompatible return value type (got "RangeIndex", expected
+ # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]")
+ return obj # type: ignore[return-value]
- obj = obj._values
+ return obj._values
elif extract_numpy and isinstance(obj, ABCPandasArray):
- obj = obj.to_numpy()
+ return obj.to_numpy()
return obj
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 8451dcb6e412a..7a5db56cb48fe 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -913,12 +913,7 @@ def _list_of_series_to_arrays(
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
- # error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
- # expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
- # Sequence[Union[int, float, complex, str, bytes, generic]],
- # Sequence[Sequence[Any]], _SupportsArray]]"
- content = np.vstack(aligned_values) # type: ignore[arg-type]
-
+ content = np.vstack(aligned_values)
return content, columns
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ef25224e5a847..3019aa1fc2dc7 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1641,9 +1641,7 @@ def _format_strings(self) -> list[str]:
formatter = self.formatter
if formatter is None:
- # error: Item "ndarray" of "Union[Any, Union[ExtensionArray, ndarray]]" has
- # no attribute "_formatter"
- formatter = values._formatter(boxed=True) # type: ignore[union-attr]
+ formatter = values._formatter(boxed=True)
if isinstance(values, Categorical):
# Categorical is special for now, so that we can preserve tzinfo
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fc9671c2fc973..c20ce0c847b61 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -38,6 +38,7 @@
)
from pandas._libs.tslibs import timezones
from pandas._typing import (
+ AnyArrayLike,
ArrayLike,
DtypeArg,
Shape,
@@ -3042,7 +3043,7 @@ def write_array_empty(self, key: str, value: ArrayLike):
node._v_attrs.shape = value.shape
def write_array(
- self, key: str, obj: DataFrame | Series, items: Index | None = None
+ self, key: str, obj: AnyArrayLike, items: Index | None = None
) -> None:
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
| xref https://github.com/pandas-dev/pandas/issues/37715
Narrowing the type bound allows resolving some ignored mypy errors.
The other modified code is needed because `extract_array` no longer returns `Any`, causing more strict type checking in the calling methods. | https://api.github.com/repos/pandas-dev/pandas/pulls/46942 | 2022-05-04T20:09:51Z | 2022-05-25T22:28:01Z | 2022-05-25T22:28:01Z | 2022-08-02T19:28:09Z |
TYP: enable reportUnusedImport | diff --git a/pandas/__init__.py b/pandas/__init__.py
index 01ff2e1e1f181..eb5ce71141f46 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -1,4 +1,3 @@
-# flake8: noqa
from __future__ import annotations
__docformat__ = "restructuredtext"
@@ -20,7 +19,7 @@
del _hard_dependencies, _dependency, _missing_dependencies
# numpy compat
-from pandas.compat import is_numpy_dev as _is_numpy_dev
+from pandas.compat import is_numpy_dev as _is_numpy_dev # pyright: ignore # noqa:F401
try:
from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib
@@ -44,7 +43,7 @@
)
# let init-time option registration happen
-import pandas.core.config_init
+import pandas.core.config_init # pyright: ignore # noqa:F401
from pandas.core.api import (
# dtype
@@ -134,7 +133,8 @@
qcut,
)
-from pandas import api, arrays, errors, io, plotting, testing, tseries
+from pandas import api, arrays, errors, io, plotting, tseries
+from pandas import testing # noqa:PDF015
from pandas.util._print_versions import show_versions
from pandas.io.api import (
diff --git a/pandas/_config/__init__.py b/pandas/_config/__init__.py
index 65936a9fcdbf3..929f8a5af6b3f 100644
--- a/pandas/_config/__init__.py
+++ b/pandas/_config/__init__.py
@@ -16,7 +16,7 @@
"options",
]
from pandas._config import config
-from pandas._config import dates # noqa:F401
+from pandas._config import dates # pyright: ignore # noqa:F401
from pandas._config.config import (
describe_option,
get_option,
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 5e90eae27f981..1035fd08a1a36 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -19,7 +19,7 @@
import numpy as np
-from pandas._config.localization import ( # noqa:F401
+from pandas._config.localization import (
can_set_locale,
get_locales,
set_locale,
@@ -49,7 +49,7 @@
Series,
bdate_range,
)
-from pandas._testing._io import ( # noqa:F401
+from pandas._testing._io import (
close,
network,
round_trip_localpath,
@@ -57,16 +57,16 @@
round_trip_pickle,
write_to_compressed,
)
-from pandas._testing._random import ( # noqa:F401
+from pandas._testing._random import (
randbool,
rands,
rands_array,
)
-from pandas._testing._warnings import ( # noqa:F401
+from pandas._testing._warnings import (
assert_produces_warning,
maybe_produces_warning,
)
-from pandas._testing.asserters import ( # noqa:F401
+from pandas._testing.asserters import (
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
@@ -91,11 +91,11 @@
assert_timedelta_array_equal,
raise_assert_detail,
)
-from pandas._testing.compat import ( # noqa:F401
+from pandas._testing.compat import (
get_dtype,
get_obj,
)
-from pandas._testing.contexts import ( # noqa:F401
+from pandas._testing.contexts import (
RNGContext,
decompress_file,
ensure_clean,
@@ -1033,3 +1033,128 @@ def shares_memory(left, right) -> bool:
return shares_memory(arr, right)
raise NotImplementedError(type(left), type(right))
+
+
+__all__ = [
+ "ALL_INT_EA_DTYPES",
+ "ALL_INT_NUMPY_DTYPES",
+ "ALL_NUMPY_DTYPES",
+ "ALL_REAL_NUMPY_DTYPES",
+ "all_timeseries_index_generator",
+ "assert_almost_equal",
+ "assert_attr_equal",
+ "assert_categorical_equal",
+ "assert_class_equal",
+ "assert_contains_all",
+ "assert_copy",
+ "assert_datetime_array_equal",
+ "assert_dict_equal",
+ "assert_equal",
+ "assert_extension_array_equal",
+ "assert_frame_equal",
+ "assert_index_equal",
+ "assert_indexing_slices_equivalent",
+ "assert_interval_array_equal",
+ "assert_is_sorted",
+ "assert_is_valid_plot_return_object",
+ "assert_metadata_equivalent",
+ "assert_numpy_array_equal",
+ "assert_period_array_equal",
+ "assert_produces_warning",
+ "assert_series_equal",
+ "assert_sp_array_equal",
+ "assert_timedelta_array_equal",
+ "at",
+ "BOOL_DTYPES",
+ "box_expected",
+ "BYTES_DTYPES",
+ "can_set_locale",
+ "close",
+ "COMPLEX_DTYPES",
+ "convert_rows_list_to_csv_str",
+ "DATETIME64_DTYPES",
+ "decompress_file",
+ "EMPTY_STRING_PATTERN",
+ "ENDIAN",
+ "ensure_clean",
+ "ensure_clean_dir",
+ "ensure_safe_environment_variables",
+ "equalContents",
+ "external_error_raised",
+ "FLOAT_EA_DTYPES",
+ "FLOAT_NUMPY_DTYPES",
+ "getCols",
+ "get_cython_table_params",
+ "get_dtype",
+ "getitem",
+ "get_locales",
+ "getMixedTypeDict",
+ "get_obj",
+ "get_op_from_name",
+ "getPeriodData",
+ "getSeriesData",
+ "getTimeSeriesData",
+ "iat",
+ "iloc",
+ "index_subclass_makers_generator",
+ "loc",
+ "makeBoolIndex",
+ "makeCategoricalIndex",
+ "makeCustomDataframe",
+ "makeCustomIndex",
+ "makeDataFrame",
+ "makeDateIndex",
+ "makeFloatIndex",
+ "makeFloatSeries",
+ "makeIntervalIndex",
+ "makeIntIndex",
+ "makeMissingDataframe",
+ "makeMixedDataFrame",
+ "makeMultiIndex",
+ "makeNumericIndex",
+ "makeObjectSeries",
+ "makePeriodFrame",
+ "makePeriodIndex",
+ "makePeriodSeries",
+ "make_rand_series",
+ "makeRangeIndex",
+ "makeStringIndex",
+ "makeStringSeries",
+ "makeTimeDataFrame",
+ "makeTimedeltaIndex",
+ "makeTimeSeries",
+ "makeUIntIndex",
+ "maybe_produces_warning",
+ "NARROW_NP_DTYPES",
+ "network",
+ "NP_NAT_OBJECTS",
+ "NULL_OBJECTS",
+ "OBJECT_DTYPES",
+ "raise_assert_detail",
+ "randbool",
+ "rands",
+ "reset_display_options",
+ "reset_testing_mode",
+ "RNGContext",
+ "round_trip_localpath",
+ "round_trip_pathlib",
+ "round_trip_pickle",
+ "setitem",
+ "set_locale",
+ "set_testing_mode",
+ "set_timezone",
+ "shares_memory",
+ "SIGNED_INT_EA_DTYPES",
+ "SIGNED_INT_NUMPY_DTYPES",
+ "STRING_DTYPES",
+ "SubclassedCategorical",
+ "SubclassedDataFrame",
+ "SubclassedSeries",
+ "TIMEDELTA64_DTYPES",
+ "to_array",
+ "UNSIGNED_INT_EA_DTYPES",
+ "UNSIGNED_INT_NUMPY_DTYPES",
+ "use_numexpr",
+ "with_csv_dialect",
+ "write_to_compressed",
+]
diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py
index 67fd722c9198b..22a09ed61d694 100644
--- a/pandas/api/__init__.py
+++ b/pandas/api/__init__.py
@@ -1,7 +1,14 @@
""" public toolkit API """
-from pandas.api import ( # noqa:F401
+from pandas.api import (
exchange,
extensions,
indexers,
types,
)
+
+__all__ = [
+ "exchange",
+ "extensions",
+ "indexers",
+ "types",
+]
diff --git a/pandas/conftest.py b/pandas/conftest.py
index eb17aac99a904..e176707d8a8f1 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1734,7 +1734,7 @@ def spmatrix(request):
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
- if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
+ if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick"
]
)
def tick_classes(request):
diff --git a/pandas/core/api.py b/pandas/core/api.py
index cf082d2013d3b..c2bedb032d479 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas._libs import (
NaT,
Period,
@@ -84,3 +82,65 @@
# DataFrame needs to be imported after NamedAgg to avoid a circular import
from pandas.core.frame import DataFrame # isort:skip
+
+__all__ = [
+ "array",
+ "bdate_range",
+ "BooleanDtype",
+ "Categorical",
+ "CategoricalDtype",
+ "CategoricalIndex",
+ "DataFrame",
+ "DateOffset",
+ "date_range",
+ "DatetimeIndex",
+ "DatetimeTZDtype",
+ "factorize",
+ "Flags",
+ "Float32Dtype",
+ "Float64Dtype",
+ "Float64Index",
+ "Grouper",
+ "Index",
+ "IndexSlice",
+ "Int16Dtype",
+ "Int32Dtype",
+ "Int64Dtype",
+ "Int64Index",
+ "Int8Dtype",
+ "Interval",
+ "IntervalDtype",
+ "IntervalIndex",
+ "interval_range",
+ "isna",
+ "isnull",
+ "MultiIndex",
+ "NA",
+ "NamedAgg",
+ "NaT",
+ "notna",
+ "notnull",
+ "NumericIndex",
+ "Period",
+ "PeriodDtype",
+ "PeriodIndex",
+ "period_range",
+ "RangeIndex",
+ "Series",
+ "set_eng_float_format",
+ "StringDtype",
+ "Timedelta",
+ "TimedeltaIndex",
+ "timedelta_range",
+ "Timestamp",
+ "to_datetime",
+ "to_numeric",
+ "to_timedelta",
+ "UInt16Dtype",
+ "UInt32Dtype",
+ "UInt64Dtype",
+ "UInt64Index",
+ "UInt8Dtype",
+ "unique",
+ "value_counts",
+]
diff --git a/pandas/core/arrays/arrow/__init__.py b/pandas/core/arrays/arrow/__init__.py
index 6bdf29e38ac62..58b268cbdd221 100644
--- a/pandas/core/arrays/arrow/__init__.py
+++ b/pandas/core/arrays/arrow/__init__.py
@@ -1,3 +1,3 @@
-# flake8: noqa: F401
-
from pandas.core.arrays.arrow.array import ArrowExtensionArray
+
+__all__ = ["ArrowExtensionArray"]
diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py
index 18294ead0329d..56dbc6df54fc9 100644
--- a/pandas/core/arrays/sparse/__init__.py
+++ b/pandas/core/arrays/sparse/__init__.py
@@ -1,5 +1,3 @@
-# flake8: noqa: F401
-
from pandas.core.arrays.sparse.accessor import (
SparseAccessor,
SparseFrameAccessor,
@@ -11,3 +9,13 @@
make_sparse_index,
)
from pandas.core.arrays.sparse.dtype import SparseDtype
+
+__all__ = [
+ "BlockIndex",
+ "IntIndex",
+ "make_sparse_index",
+ "SparseAccessor",
+ "SparseArray",
+ "SparseDtype",
+ "SparseFrameAccessor",
+]
diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py
index bb6bfda183802..e6a59bf12d7cc 100644
--- a/pandas/core/dtypes/api.py
+++ b/pandas/core/dtypes/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas.core.dtypes.common import (
is_array_like,
is_bool,
@@ -43,3 +41,47 @@
is_unsigned_integer_dtype,
pandas_dtype,
)
+
+__all__ = [
+ "is_array_like",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_dict_like",
+ "is_dtype_equal",
+ "is_extension_array_dtype",
+ "is_extension_type",
+ "is_file_like",
+ "is_float",
+ "is_float_dtype",
+ "is_hashable",
+ "is_int64_dtype",
+ "is_integer",
+ "is_integer_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_iterator",
+ "is_list_like",
+ "is_named_tuple",
+ "is_number",
+ "is_numeric_dtype",
+ "is_object_dtype",
+ "is_period_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_scalar",
+ "is_signed_integer_dtype",
+ "is_sparse",
+ "is_string_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "pandas_dtype",
+]
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 378f33e2b65ac..c10461b2fc7f8 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -36,7 +36,7 @@
ABCCategorical,
ABCIndex,
)
-from pandas.core.dtypes.inference import ( # noqa:F401
+from pandas.core.dtypes.inference import (
is_array_like,
is_bool,
is_complex,
@@ -1814,3 +1814,70 @@ def is_all_strings(value: ArrayLike) -> bool:
elif isinstance(dtype, CategoricalDtype):
return dtype.categories.inferred_type == "string"
return dtype == "string"
+
+
+__all__ = [
+ "classes",
+ "classes_and_not_datetimelike",
+ "DT64NS_DTYPE",
+ "ensure_float",
+ "ensure_float64",
+ "ensure_python_int",
+ "ensure_str",
+ "get_dtype",
+ "infer_dtype_from_object",
+ "INT64_DTYPE",
+ "is_1d_only_ea_dtype",
+ "is_1d_only_ea_obj",
+ "is_all_strings",
+ "is_any_int_dtype",
+ "is_array_like",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_dataclass",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_datetimelike_v_numeric",
+ "is_datetime_or_timedelta_dtype",
+ "is_decimal",
+ "is_dict_like",
+ "is_dtype_equal",
+ "is_ea_or_datetimelike_dtype",
+ "is_extension_array_dtype",
+ "is_extension_type",
+ "is_file_like",
+ "is_float_dtype",
+ "is_int64_dtype",
+ "is_integer_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_iterator",
+ "is_named_tuple",
+ "is_nested_list_like",
+ "is_number",
+ "is_numeric_dtype",
+ "is_numeric_v_string_like",
+ "is_object_dtype",
+ "is_period_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_scipy_sparse",
+ "is_sequence",
+ "is_signed_integer_dtype",
+ "is_sparse",
+ "is_string_dtype",
+ "is_string_or_object_np_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "needs_i8_conversion",
+ "pandas_dtype",
+ "TD64NS_DTYPE",
+ "validate_all_hashable",
+]
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 00ca6f9048a40..19e9c6b27e4e7 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1,3 +1,6 @@
+# pyright: reportUnusedImport = false
+from __future__ import annotations
+
import warnings
from pandas.util._exceptions import find_stack_level
@@ -30,3 +33,5 @@
FutureWarning,
stacklevel=find_stack_level(),
)
+
+__all__: list[str] = []
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index 75715bdc90003..ea69b567611e4 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -23,7 +23,6 @@
__all__ = [
"Block",
- "CategoricalBlock",
"NumericBlock",
"DatetimeTZBlock",
"ExtensionBlock",
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index 540a557f7c7cc..e9fefd9268870 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -11,7 +11,7 @@
import numpy as np
-from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
+from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op
from pandas._typing import Level
from pandas.util._decorators import Appender
from pandas.util._exceptions import find_stack_level
@@ -30,7 +30,7 @@
algorithms,
roperator,
)
-from pandas.core.ops.array_ops import ( # noqa:F401
+from pandas.core.ops.array_ops import (
arithmetic_op,
comp_method_OBJECT_ARRAY,
comparison_op,
@@ -38,7 +38,7 @@
logical_op,
maybe_prepare_scalar_for_op,
)
-from pandas.core.ops.common import ( # noqa:F401
+from pandas.core.ops.common import (
get_op_result_name,
unpack_zerodim_and_defer,
)
@@ -47,14 +47,14 @@
_op_descriptions,
make_flex_doc,
)
-from pandas.core.ops.invalid import invalid_comparison # noqa:F401
-from pandas.core.ops.mask_ops import ( # noqa: F401
+from pandas.core.ops.invalid import invalid_comparison
+from pandas.core.ops.mask_ops import (
kleene_and,
kleene_or,
kleene_xor,
)
-from pandas.core.ops.methods import add_flex_arithmetic_methods # noqa:F401
-from pandas.core.roperator import ( # noqa:F401
+from pandas.core.ops.methods import add_flex_arithmetic_methods
+from pandas.core.roperator import (
radd,
rand_,
rdiv,
@@ -473,3 +473,40 @@ def f(self, other, axis=default_axis, level=None):
f.__name__ = op_name
return f
+
+
+__all__ = [
+ "add_flex_arithmetic_methods",
+ "align_method_FRAME",
+ "align_method_SERIES",
+ "ARITHMETIC_BINOPS",
+ "arithmetic_op",
+ "COMPARISON_BINOPS",
+ "comparison_op",
+ "comp_method_OBJECT_ARRAY",
+ "fill_binop",
+ "flex_arith_method_FRAME",
+ "flex_comp_method_FRAME",
+ "flex_method_SERIES",
+ "frame_arith_method_with_reindex",
+ "invalid_comparison",
+ "kleene_and",
+ "kleene_or",
+ "kleene_xor",
+ "logical_op",
+ "maybe_dispatch_ufunc_to_dunder_op",
+ "radd",
+ "rand_",
+ "rdiv",
+ "rdivmod",
+ "rfloordiv",
+ "rmod",
+ "rmul",
+ "ror_",
+ "rpow",
+ "rsub",
+ "rtruediv",
+ "rxor",
+ "should_reindex_frame_op",
+ "unpack_zerodim_and_defer",
+]
diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py
index f100cca5c7615..b1884c497f0ad 100644
--- a/pandas/core/reshape/api.py
+++ b/pandas/core/reshape/api.py
@@ -1,5 +1,3 @@
-# flake8: noqa:F401
-
from pandas.core.reshape.concat import concat
from pandas.core.reshape.encoding import (
from_dummies,
@@ -24,3 +22,20 @@
cut,
qcut,
)
+
+__all__ = [
+ "concat",
+ "crosstab",
+ "cut",
+ "from_dummies",
+ "get_dummies",
+ "lreshape",
+ "melt",
+ "merge",
+ "merge_asof",
+ "merge_ordered",
+ "pivot",
+ "pivot_table",
+ "qcut",
+ "wide_to_long",
+]
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 7de34c04a31ed..1ec0e6ca83d8f 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -29,7 +29,7 @@
parsing,
timezones,
)
-from pandas._libs.tslibs.parsing import ( # noqa:F401
+from pandas._libs.tslibs.parsing import (
DateParseError,
format_is_iso,
guess_datetime_format,
@@ -1289,3 +1289,11 @@ def to_time(arg, format=None, infer_time_format=False, errors="raise"):
from pandas.core.tools.times import to_time
return to_time(arg, format, infer_time_format, errors)
+
+
+__all__ = [
+ "DateParseError",
+ "should_cache",
+ "to_datetime",
+ "to_time",
+]
diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py
index 8f42cd782c67f..857e12e5467a6 100644
--- a/pandas/core/window/__init__.py
+++ b/pandas/core/window/__init__.py
@@ -1,13 +1,23 @@
-from pandas.core.window.ewm import ( # noqa:F401
+from pandas.core.window.ewm import (
ExponentialMovingWindow,
ExponentialMovingWindowGroupby,
)
-from pandas.core.window.expanding import ( # noqa:F401
+from pandas.core.window.expanding import (
Expanding,
ExpandingGroupby,
)
-from pandas.core.window.rolling import ( # noqa:F401
+from pandas.core.window.rolling import (
Rolling,
RollingGroupby,
Window,
)
+
+__all__ = [
+ "Expanding",
+ "ExpandingGroupby",
+ "ExponentialMovingWindow",
+ "ExponentialMovingWindowGroupby",
+ "Rolling",
+ "RollingGroupby",
+ "Window",
+]
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6e47c46cc7203..93f07c5d75625 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -108,7 +108,6 @@
)
from pandas.core.generic import NDFrame
from pandas.core.groupby.ops import BaseGrouper
- from pandas.core.internals import Block # noqa:F401
class BaseWindow(SelectionMixin):
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 98a9d2b35f09d..47819ae5fad23 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -5,9 +5,9 @@
import ctypes
-from pandas._config.config import OptionError # noqa:F401
+from pandas._config.config import OptionError
-from pandas._libs.tslibs import ( # noqa:F401
+from pandas._libs.tslibs import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
)
@@ -413,3 +413,35 @@ class CSSWarning(UserWarning):
... .to_excel('styled.xlsx') # doctest: +SKIP
... # CSSWarning: Too many tokens provided to "border" (expected 1-3)
"""
+
+
+__all__ = [
+ "AbstractMethodError",
+ "AccessorRegistrationWarning",
+ "CSSWarning",
+ "DataError",
+ "DtypeWarning",
+ "DuplicateLabelError",
+ "EmptyDataError",
+ "IntCastingNaNError",
+ "InvalidIndexError",
+ "IndexingError",
+ "MergeError",
+ "NullFrequencyError",
+ "NumbaUtilError",
+ "NumExprClobberingError",
+ "OptionError",
+ "OutOfBoundsDatetime",
+ "OutOfBoundsTimedelta",
+ "ParserError",
+ "ParserWarning",
+ "PerformanceWarning",
+ "PyperclipException",
+ "PyperclipWindowsException",
+ "SettingWithCopyError",
+ "SettingWithCopyWarning",
+ "SpecificationError",
+ "UndefinedVariableError",
+ "UnsortedIndexError",
+ "UnsupportedFunctionCall",
+]
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 5926f2166ee9d..4e8b34a61dfc6 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -2,8 +2,6 @@
Data IO api
"""
-# flake8: noqa
-
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import (
ExcelFile,
@@ -38,3 +36,30 @@
)
from pandas.io.stata import read_stata
from pandas.io.xml import read_xml
+
+__all__ = [
+ "ExcelFile",
+ "ExcelWriter",
+ "HDFStore",
+ "read_clipboard",
+ "read_csv",
+ "read_excel",
+ "read_feather",
+ "read_fwf",
+ "read_gbq",
+ "read_hdf",
+ "read_html",
+ "read_json",
+ "read_orc",
+ "read_parquet",
+ "read_pickle",
+ "read_sas",
+ "read_spss",
+ "read_sql",
+ "read_sql_query",
+ "read_sql_table",
+ "read_stata",
+ "read_table",
+ "read_xml",
+ "to_pickle",
+]
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index d28309cda6788..ed0e0a99ec43b 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -151,7 +151,7 @@ def __init__(self) -> None:
import pyarrow.parquet
# import utils to register the pyarrow extension types
- import pandas.core.arrays.arrow._arrow_utils # noqa:F401
+ import pandas.core.arrays.arrow._arrow_utils # pyright: ignore # noqa:F401
self.api = pyarrow
diff --git a/pandas/io/sas/__init__.py b/pandas/io/sas/__init__.py
index 71027fd064f3d..317730745b6e3 100644
--- a/pandas/io/sas/__init__.py
+++ b/pandas/io/sas/__init__.py
@@ -1 +1,3 @@
-from pandas.io.sas.sasreader import read_sas # noqa:F401
+from pandas.io.sas.sasreader import read_sas
+
+__all__ = ["read_sas"]
diff --git a/pandas/tests/tseries/offsets/conftest.py b/pandas/tests/tseries/offsets/conftest.py
index df68c98dca43f..72f5c4a519a3a 100644
--- a/pandas/tests/tseries/offsets/conftest.py
+++ b/pandas/tests/tseries/offsets/conftest.py
@@ -5,7 +5,11 @@
import pandas.tseries.offsets as offsets
-@pytest.fixture(params=[getattr(offsets, o) for o in offsets.__all__])
+@pytest.fixture(
+ params=[
+ getattr(offsets, o) for o in offsets.__all__ if o not in ("Tick", "BaseOffset")
+ ]
+)
def offset_types(request):
"""
Fixture for all the datetime offsets available for a time series.
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index 59666fa0048dd..e274838d45b27 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -2,7 +2,7 @@
Timeseries API
"""
-# flake8: noqa:F401
-
from pandas.tseries.frequencies import infer_freq
import pandas.tseries.offsets as offsets
+
+__all__ = ["infer_freq", "offsets"]
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index a4fe2161983b6..b2fbc022b2708 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -22,7 +22,7 @@
build_field_sarray,
month_position_check,
)
-from pandas._libs.tslibs.offsets import ( # noqa:F401
+from pandas._libs.tslibs.offsets import (
BaseOffset,
DateOffset,
Day,
@@ -647,3 +647,14 @@ def _is_monthly(rule: str) -> bool:
def _is_weekly(rule: str) -> bool:
rule = rule.upper()
return rule == "W" or rule.startswith("W-")
+
+
+__all__ = [
+ "Day",
+ "get_offset",
+ "get_period_alias",
+ "infer_freq",
+ "is_subperiod",
+ "is_superperiod",
+ "to_offset",
+]
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 6fd49e2340e30..6426dbcd54489 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -6,7 +6,7 @@
)
import warnings
-from dateutil.relativedelta import ( # noqa:F401
+from dateutil.relativedelta import (
FR,
MO,
SA,
@@ -582,3 +582,27 @@ def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
+
+
+__all__ = [
+ "after_nearest_workday",
+ "before_nearest_workday",
+ "FR",
+ "get_calendar",
+ "HolidayCalendarFactory",
+ "MO",
+ "nearest_workday",
+ "next_monday",
+ "next_monday_or_tuesday",
+ "next_workday",
+ "previous_friday",
+ "previous_workday",
+ "register",
+ "SA",
+ "SU",
+ "sunday_to_monday",
+ "TH",
+ "TU",
+ "WE",
+ "weekend_to_monday",
+]
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index cee99d23f8d90..b995c6ac78b80 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,4 +1,4 @@
-from pandas._libs.tslibs.offsets import ( # noqa:F401
+from pandas._libs.tslibs.offsets import (
FY5253,
BaseOffset,
BDay,
@@ -45,9 +45,14 @@
__all__ = [
"Day",
+ "BaseOffset",
"BusinessDay",
+ "BusinessMonthBegin",
+ "BusinessMonthEnd",
"BDay",
"CustomBusinessDay",
+ "CustomBusinessMonthBegin",
+ "CustomBusinessMonthEnd",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
@@ -73,6 +78,7 @@
"Week",
"WeekOfMonth",
"Easter",
+ "Tick",
"Hour",
"Minute",
"Second",
diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py
index 7adfca73c2f1e..6e6006dd28165 100644
--- a/pandas/util/__init__.py
+++ b/pandas/util/__init__.py
@@ -1,3 +1,4 @@
+# pyright: reportUnusedImport = false
from pandas.util._decorators import ( # noqa:F401
Appender,
Substitution,
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 978f2982e6d18..cec4ee40a8c7a 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -11,7 +11,7 @@
)
import warnings
-from pandas._libs.properties import cache_readonly # noqa:F401
+from pandas._libs.properties import cache_readonly
from pandas._typing import F
from pandas.util._exceptions import find_stack_level
@@ -498,3 +498,16 @@ def indent(text: str | None, indents: int = 1) -> str:
return ""
jointext = "".join(["\n"] + [" "] * indents)
return jointext.join(text.split("\n"))
+
+
+__all__ = [
+ "Appender",
+ "cache_readonly",
+ "deprecate",
+ "deprecate_kwarg",
+ "deprecate_nonkeyword_arguments",
+ "doc",
+ "future_version_msg",
+ "rewrite_axis_style_signature",
+ "Substitution",
+]
diff --git a/pyproject.toml b/pyproject.toml
index 0e2e41fba461c..6ca37581b03f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -166,6 +166,7 @@ reportPropertyTypeMismatch = true
reportUntypedClassDecorator = true
reportUntypedFunctionDecorator = true
reportUntypedNamedTuple = true
+reportUnusedImport = true
# disable subset of "basic"
reportGeneralTypeIssues = false
reportMissingModuleSource = false
@@ -176,4 +177,3 @@ reportOptionalOperand = false
reportOptionalSubscript = false
reportPrivateImportUsage = false
reportUnboundVariable = false
-reportUnsupportedDunderAll = false
| pyright's reportUnusedImport checks py and pyi files for unused imports. If an import is explicitly marked as public (re-exported using `as` or in `__all__`) it is "used" (flake8 seems to use the same definition).
Adding unused imports to `__all__` becomes messy when the file does not yet have `__all__`: need to list also all public symbols (constants, classes, functions). | https://api.github.com/repos/pandas-dev/pandas/pulls/46937 | 2022-05-04T03:34:35Z | 2022-07-10T00:11:59Z | 2022-07-10T00:11:59Z | 2022-09-21T15:28:31Z |
ENH: support mask in libalgos.rank | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 60bdb504c545b..0cc9209fbdfc5 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -109,6 +109,7 @@ def rank_1d(
ascending: bool = ...,
pct: bool = ...,
na_option=...,
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
def rank_2d(
in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2]
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 6c28b4f821080..d33eba06988e9 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -889,6 +889,7 @@ def rank_1d(
bint ascending=True,
bint pct=False,
na_option="keep",
+ const uint8_t[:] mask=None,
):
"""
Fast NaN-friendly version of ``scipy.stats.rankdata``.
@@ -918,6 +919,8 @@ def rank_1d(
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
+ mask : np.ndarray[bool], optional, default None
+ Specify locations to be treated as NA, for e.g. Categorical.
"""
cdef:
TiebreakEnumType tiebreak
@@ -927,7 +930,6 @@ def rank_1d(
float64_t[::1] out
ndarray[numeric_object_t, ndim=1] masked_vals
numeric_object_t[:] masked_vals_memview
- uint8_t[:] mask
bint keep_na, nans_rank_highest, check_labels, check_mask
numeric_object_t nan_fill_val
@@ -956,6 +958,7 @@ def rank_1d(
or numeric_object_t is object
or (numeric_object_t is int64_t and is_datetimelike)
)
+ check_mask = check_mask or mask is not None
# Copy values into new array in order to fill missing data
# with mask, without obfuscating location of missing data
@@ -965,7 +968,9 @@ def rank_1d(
else:
masked_vals = values.copy()
- if numeric_object_t is object:
+ if mask is not None:
+ pass
+ elif numeric_object_t is object:
mask = missing.isnaobj(masked_vals)
elif numeric_object_t is int64_t and is_datetimelike:
mask = (masked_vals == NPY_NAT).astype(np.uint8)
diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi
index 197a8bdc0cd7c..2f0c3980c0c02 100644
--- a/pandas/_libs/groupby.pyi
+++ b/pandas/_libs/groupby.pyi
@@ -128,6 +128,7 @@ def group_rank(
ascending: bool = ...,
pct: bool = ...,
na_option: Literal["keep", "top", "bottom"] = ...,
+ mask: npt.NDArray[np.bool_] | None = ...,
) -> None: ...
def group_max(
out: np.ndarray, # groupby_t[:, ::1]
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index 9bc89eef089cd..03f318d08d8cb 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -1262,6 +1262,7 @@ def group_rank(
bint ascending=True,
bint pct=False,
str na_option="keep",
+ const uint8_t[:, :] mask=None,
) -> None:
"""
Provides the rank of values within each group.
@@ -1294,6 +1295,7 @@ def group_rank(
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
+ mask : np.ndarray[bool] or None, default None
Notes
-----
@@ -1302,10 +1304,16 @@ def group_rank(
cdef:
Py_ssize_t i, k, N
ndarray[float64_t, ndim=1] result
+ const uint8_t[:] sub_mask
N = values.shape[1]
for k in range(N):
+ if mask is None:
+ sub_mask = None
+ else:
+ sub_mask = mask[:, k]
+
result = rank_1d(
values=values[:, k],
labels=labels,
@@ -1313,7 +1321,8 @@ def group_rank(
ties_method=ties_method,
ascending=ascending,
pct=pct,
- na_option=na_option
+ na_option=na_option,
+ mask=sub_mask,
)
for i in range(len(result)):
# TODO: why can't we do out[:, k] = result?
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 09954bd6be4e4..a769c92e0b542 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -46,7 +46,6 @@
ensure_platform_int,
is_1d_only_ea_dtype,
is_bool_dtype,
- is_categorical_dtype,
is_complex_dtype,
is_datetime64_any_dtype,
is_float_dtype,
@@ -56,12 +55,14 @@
is_timedelta64_dtype,
needs_i8_conversion,
)
+from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import (
isna,
maybe_fill,
)
from pandas.core.arrays import (
+ Categorical,
DatetimeArray,
ExtensionArray,
PeriodArray,
@@ -142,7 +143,15 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None:
# "group_any" and "group_all" are also support masks, but don't go
# through WrappedCythonOp
- _MASKED_CYTHON_FUNCTIONS = {"cummin", "cummax", "min", "max", "last", "first"}
+ _MASKED_CYTHON_FUNCTIONS = {
+ "cummin",
+ "cummax",
+ "min",
+ "max",
+ "last",
+ "first",
+ "rank",
+ }
_cython_arity = {"ohlc": 4} # OHLC
@@ -229,12 +238,17 @@ def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
# never an invalid op for those dtypes, so return early as fastpath
return
- if is_categorical_dtype(dtype):
+ if isinstance(dtype, CategoricalDtype):
# NotImplementedError for methods that can fall back to a
# non-cython implementation.
if how in ["add", "prod", "cumsum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
- raise NotImplementedError(f"{dtype} dtype not supported")
+ elif how not in ["rank"]:
+ # only "rank" is implemented in cython
+ raise NotImplementedError(f"{dtype} dtype not supported")
+ elif not dtype.ordered:
+ # TODO: TypeError?
+ raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
@@ -332,6 +346,25 @@ def _ea_wrap_cython_operation(
**kwargs,
)
+ elif isinstance(values, Categorical) and self.uses_mask():
+ assert self.how == "rank" # the only one implemented ATM
+ assert values.ordered # checked earlier
+ mask = values.isna()
+ npvalues = values._ndarray
+
+ res_values = self._cython_op_ndim_compat(
+ npvalues,
+ min_count=min_count,
+ ngroups=ngroups,
+ comp_ids=comp_ids,
+ mask=mask,
+ **kwargs,
+ )
+
+ # If we ever have more than just "rank" here, we'll need to do
+ # `if self.how in self.cast_blocklist` like we do for other dtypes.
+ return res_values
+
npvalues = self._ea_to_cython_values(values)
res_values = self._cython_op_ndim_compat(
@@ -551,6 +584,9 @@ def _call_cython_op(
else:
# TODO: min_count
if self.uses_mask():
+ if self.how != "rank":
+ # TODO: should rank take result_mask?
+ kwargs["result_mask"] = result_mask
func(
out=result,
values=values,
@@ -558,7 +594,6 @@ def _call_cython_op(
ngroups=ngroups,
is_datetimelike=is_datetimelike,
mask=mask,
- result_mask=result_mask,
**kwargs,
)
else:
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 7830c229ece2f..8bbe38d3379ac 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -458,6 +458,8 @@ def test_rank_avg_even_vals(dtype, upper):
result = df.groupby("key").rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
+ if upper:
+ exp_df = exp_df.astype("Float64")
tm.assert_frame_equal(result, exp_df)
@@ -663,3 +665,17 @@ def test_non_unique_index():
name="value",
)
tm.assert_series_equal(result, expected)
+
+
+def test_rank_categorical():
+ cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True)
+ cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True)
+
+ df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2})
+
+ gb = df.groupby("col1")
+
+ res = gb.rank()
+
+ expected = df.astype(object).groupby("col1").rank()
+ tm.assert_frame_equal(res, expected)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46932 | 2022-05-02T22:35:01Z | 2022-05-04T13:07:42Z | 2022-05-04T13:07:42Z | 2022-05-04T20:14:26Z |
DOC: added index, dropna description for HDF methods #45030 | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c615216240d60..13af64c9fea5d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2599,6 +2599,8 @@ def to_hdf(
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed".
+ index : bool, default True
+ Write DataFrame index as a column.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
@@ -2609,6 +2611,8 @@ def to_hdf(
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
+ dropna : bool, default False, optional
+ Remove missing values.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 505f5a74f06e6..fc9671c2fc973 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1112,6 +1112,8 @@ def put(
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
@@ -1124,6 +1126,8 @@ def put(
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
+ dropna : bool, default False, optional
+ Remove missing values.
.. versionadded:: 1.1.0
"""
@@ -1239,6 +1243,8 @@ def append(
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
+ index : bool, default True
+ Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
@@ -1251,7 +1257,7 @@ def append(
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
- dropna : bool, default False
+ dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
| - [ ] closes #45030
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46931 | 2022-05-02T22:20:01Z | 2022-05-04T23:38:22Z | 2022-05-04T23:38:22Z | 2022-05-05T08:15:34Z |
DataFrame.replace with dict doesn't work when value=None | diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index bece833066f89..890b988378870 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -482,8 +482,8 @@
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
- 'y' with 'z'. To use a dict in this way the `value`
- parameter should be `None`.
+ 'y' with 'z'. To use a dict in this way, the optional `value`
+ parameter should not be given.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
@@ -494,8 +494,8 @@
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
- 'a' for the value 'b' and replace it with NaN. The `value`
- parameter should be ``None`` to use a nested dict in this
+ 'a' for the value 'b' and replace it with NaN. The optional `value`
+ parameter should not be specified to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
| - [x] closes #46606 (Replace xxxx with the Github issue number) | https://api.github.com/repos/pandas-dev/pandas/pulls/46930 | 2022-05-02T22:16:55Z | 2022-05-03T01:36:54Z | 2022-05-03T01:36:53Z | 2022-05-03T01:37:00Z |
TYP: overload maybe_downcast_numeric and maybe_downcast_to_dtype | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index cba055d5b4345..88a92ea1455d0 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -246,6 +246,16 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
+@overload
+def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
+ ...
+
+
+@overload
+def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
+ ...
+
+
def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
"""
try to cast to the specified dtype (e.g. convert back to bool/int
@@ -301,6 +311,20 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
return result
+@overload
+def maybe_downcast_numeric(
+ result: np.ndarray, dtype: np.dtype, do_round: bool = False
+) -> np.ndarray:
+ ...
+
+
+@overload
+def maybe_downcast_numeric(
+ result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
+) -> ArrayLike:
+ ...
+
+
def maybe_downcast_numeric(
result: ArrayLike, dtype: DtypeObj, do_round: bool = False
) -> ArrayLike:
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index a769c92e0b542..599f384a89a68 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -637,9 +637,7 @@ def _call_cython_op(
else:
op_result = result
- # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]",
- # expected "ndarray")
- return op_result # type: ignore[return-value]
+ return op_result
@final
def cython_operation(
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f6a20a418c32b..c3acfc5ff2f66 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1141,11 +1141,7 @@ def interval_range(
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray]", variable has type "ndarray")
- breaks = maybe_downcast_numeric( # type: ignore[assignment]
- breaks, np.dtype("int64")
- )
+ breaks = maybe_downcast_numeric(breaks, np.dtype("int64"))
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
| xref https://github.com/pandas-dev/pandas/issues/37715
Overload type definitions for these methods so that passing in an ndarray returns an ndarray.
This allows removing two more ignored mypy errors. | https://api.github.com/repos/pandas-dev/pandas/pulls/46929 | 2022-05-02T21:28:09Z | 2022-05-06T14:23:01Z | 2022-05-06T14:23:01Z | 2022-05-06T14:23:27Z |
TYP: resolve ignored mypy errors in core/describe.py | diff --git a/pandas/core/describe.py b/pandas/core/describe.py
index 60881d7a68b10..c70dbe0b8b0b1 100644
--- a/pandas/core/describe.py
+++ b/pandas/core/describe.py
@@ -22,7 +22,10 @@
import numpy as np
from pandas._libs.tslibs import Timestamp
-from pandas._typing import NDFrameT
+from pandas._typing import (
+ NDFrameT,
+ npt,
+)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_percentile
@@ -186,11 +189,9 @@ def _select_data(self):
"""Select columns to be described."""
if (self.include is None) and (self.exclude is None):
# when some numerics are found, keep only numerics
- default_include = [np.number]
+ default_include: list[npt.DTypeLike] = [np.number]
if self.datetime_is_numeric:
- # error: Argument 1 to "append" of "list" has incompatible type "str";
- # expected "Type[number[Any]]"
- default_include.append("datetime") # type: ignore[arg-type]
+ default_include.append("datetime")
data = self.obj.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self.obj
@@ -230,10 +231,7 @@ def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:
"""
from pandas import Series
- # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]";
- # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str,
- # float]]]"
- formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type]
+ formatted_percentiles = format_percentiles(percentiles)
stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
d = (
@@ -337,10 +335,7 @@ def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
# GH-30164
from pandas import Series
- # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]";
- # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str,
- # float]]]"
- formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type]
+ formatted_percentiles = format_percentiles(percentiles)
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index fd79d418658ea..ef25224e5a847 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1667,7 +1667,7 @@ def _format_strings(self) -> list[str]:
def format_percentiles(
- percentiles: (np.ndarray | list[int | float] | list[float] | list[str | float]),
+ percentiles: (np.ndarray | Sequence[float]),
) -> list[str]:
"""
Outputs rounded and formatted percentiles.
| xref https://github.com/pandas-dev/pandas/issues/37715
| https://api.github.com/repos/pandas-dev/pandas/pulls/46928 | 2022-05-02T20:26:52Z | 2022-05-06T21:24:31Z | 2022-05-06T21:24:31Z | 2022-05-06T21:24:36Z |
CLN: tzconversion | diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index a34161b20e2ff..600ac54639dfc 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -6,9 +6,6 @@ from numpy cimport (
)
-cdef int64_t localize_tzinfo_api(
- int64_t utc_val, tzinfo tz, bint* fold=*
-) except? -1
cdef int64_t tz_convert_from_utc_single(
int64_t utc_val, tzinfo tz, bint* fold=?, Py_ssize_t* outpos=?
) except? -1
@@ -16,15 +13,6 @@ cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=*
) except? -1
-cdef Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n)
-
-cdef bint infer_dateutil_fold(
- int64_t value,
- const int64_t[::1] trans,
- const int64_t[::1] deltas,
- Py_ssize_t pos,
-)
-
cdef class Localizer:
cdef:
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 8d307e324ba4e..fede9768f5fee 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -50,7 +50,6 @@ cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
@cython.freelist(16)
-#@cython.internal
@cython.final
cdef class Localizer:
# cdef:
@@ -102,13 +101,15 @@ cdef class Localizer:
if self.use_utc:
return utc_val
elif self.use_tzlocal:
- return utc_val + localize_tzinfo_api(utc_val, self.tz, fold)
+ return utc_val + _tz_localize_using_tzinfo_api(
+ utc_val, self.tz, to_utc=False, fold=fold
+ )
elif self.use_fixed:
return utc_val + self.delta
else:
pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
if fold is not NULL:
- fold[0] = infer_dateutil_fold(
+ fold[0] = _infer_dateutil_fold(
utc_val, self.trans, self.deltas, pos[0]
)
@@ -184,10 +185,10 @@ timedelta-like}
cdef:
const int64_t[::1] deltas
ndarray[uint8_t, cast=True] ambiguous_array
- Py_ssize_t i, isl, isr, idx, pos, ntrans, n = vals.shape[0]
+ Py_ssize_t i, idx, pos, ntrans, n = vals.shape[0]
Py_ssize_t delta_idx_offset, delta_idx, pos_left, pos_right
int64_t *tdata
- int64_t v, left, right, val, v_left, v_right, new_local, remaining_mins
+ int64_t v, left, right, val, new_local, remaining_mins
int64_t first_delta, delta
int64_t shift_delta = 0
ndarray[int64_t] trans, result_a, result_b, dst_hours
@@ -202,7 +203,7 @@ timedelta-like}
if is_utc(tz) or tz is None:
return vals.copy()
- result = np.empty(n, dtype=np.int64)
+ result = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
if is_tzlocal(tz) or is_zoneinfo(tz):
for i in range(n):
@@ -265,40 +266,7 @@ timedelta-like}
# Determine whether each date lies left of the DST transition (store in
# result_a) or right of the DST transition (store in result_b)
- result_a = np.empty(n, dtype=np.int64)
- result_b = np.empty(n, dtype=np.int64)
-
- for i in range(n):
- # This loops resembles the "Find the two best possibilities" block
- # in pytz's DstTZInfo.localize method.
- result_a[i] = NPY_NAT
- result_b[i] = NPY_NAT
-
- val = vals[i]
- if val == NPY_NAT:
- continue
-
- # TODO: be careful of overflow in val-DAY_NANOS
- isl = bisect_right_i8(tdata, val - DAY_NANOS, ntrans) - 1
- if isl < 0:
- isl = 0
-
- v_left = val - deltas[isl]
- pos_left = bisect_right_i8(tdata, v_left, ntrans) - 1
- # timestamp falls to the left side of the DST transition
- if v_left + deltas[pos_left] == val:
- result_a[i] = v_left
-
- # TODO: be careful of overflow in val+DAY_NANOS
- isr = bisect_right_i8(tdata, val + DAY_NANOS, ntrans) - 1
- if isr < 0:
- isr = 0
-
- v_right = val - deltas[isr]
- pos_right = bisect_right_i8(tdata, v_right, ntrans) - 1
- # timestamp falls to the right side of the DST transition
- if v_right + deltas[pos_right] == val:
- result_b[i] = v_right
+ result_a, result_b =_get_utc_bounds(vals, tdata, ntrans, deltas)
# silence false-positive compiler warning
dst_hours = np.empty(0, dtype=np.int64)
@@ -417,6 +385,59 @@ cdef inline str _render_tstamp(int64_t val):
return str(Timestamp(val))
+cdef _get_utc_bounds(
+ ndarray vals,
+ int64_t* tdata,
+ Py_ssize_t ntrans,
+ const int64_t[::1] deltas,
+):
+ # Determine whether each date lies left of the DST transition (store in
+ # result_a) or right of the DST transition (store in result_b)
+
+ cdef:
+ ndarray result_a, result_b
+ Py_ssize_t i, n = vals.size
+ int64_t val, v_left, v_right
+ Py_ssize_t isl, isr, pos_left, pos_right
+
+ result_a = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
+ result_b = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
+
+ for i in range(n):
+ # This loops resembles the "Find the two best possibilities" block
+ # in pytz's DstTZInfo.localize method.
+ result_a[i] = NPY_NAT
+ result_b[i] = NPY_NAT
+
+ val = vals[i]
+ if val == NPY_NAT:
+ continue
+
+ # TODO: be careful of overflow in val-DAY_NANOS
+ isl = bisect_right_i8(tdata, val - DAY_NANOS, ntrans) - 1
+ if isl < 0:
+ isl = 0
+
+ v_left = val - deltas[isl]
+ pos_left = bisect_right_i8(tdata, v_left, ntrans) - 1
+ # timestamp falls to the left side of the DST transition
+ if v_left + deltas[pos_left] == val:
+ result_a[i] = v_left
+
+ # TODO: be careful of overflow in val+DAY_NANOS
+ isr = bisect_right_i8(tdata, val + DAY_NANOS, ntrans) - 1
+ if isr < 0:
+ isr = 0
+
+ v_right = val - deltas[isr]
+ pos_right = bisect_right_i8(tdata, v_right, ntrans) - 1
+ # timestamp falls to the right side of the DST transition
+ if v_right + deltas[pos_right] == val:
+ result_b[i] = v_right
+
+ return result_a, result_b
+
+
@cython.boundscheck(False)
cdef ndarray[int64_t] _get_dst_hours(
# vals only needed here to potential render an exception message
@@ -433,10 +454,10 @@ cdef ndarray[int64_t] _get_dst_hours(
intp_t switch_idx
int64_t left, right
- dst_hours = np.empty(n, dtype=np.int64)
+ dst_hours = cnp.PyArray_EMPTY(result_a.ndim, result_a.shape, cnp.NPY_INT64, 0)
dst_hours[:] = NPY_NAT
- mismatch = np.zeros(n, dtype=bool)
+ mismatch = cnp.PyArray_ZEROS(result_a.ndim, result_a.shape, cnp.NPY_BOOL, 0)
for i in range(n):
left = result_a[i]
@@ -450,6 +471,7 @@ cdef ndarray[int64_t] _get_dst_hours(
trans_idx = mismatch.nonzero()[0]
if trans_idx.size == 1:
+ # TODO: not reached in tests 2022-05-02; possible?
stamp = _render_tstamp(vals[trans_idx[0]])
raise pytz.AmbiguousTimeError(
f"Cannot infer dst time from {stamp} as there "
@@ -471,6 +493,7 @@ cdef ndarray[int64_t] _get_dst_hours(
delta = np.diff(result_a[grp])
if grp.size == 1 or np.all(delta > 0):
+ # TODO: not reached in tests 2022-05-02; possible?
stamp = _render_tstamp(vals[grp[0]])
raise pytz.AmbiguousTimeError(stamp)
@@ -478,6 +501,7 @@ cdef ndarray[int64_t] _get_dst_hours(
# for standard
switch_idxs = (delta <= 0).nonzero()[0]
if switch_idxs.size > 1:
+ # TODO: not reached in tests 2022-05-02; possible?
raise pytz.AmbiguousTimeError(
f"There are {switch_idxs.size} dst switches when "
"there should only be 1."
@@ -495,15 +519,6 @@ cdef ndarray[int64_t] _get_dst_hours(
# ----------------------------------------------------------------------
# Timezone Conversion
-cdef int64_t localize_tzinfo_api(
- int64_t utc_val, tzinfo tz, bint* fold=NULL
-) except? -1:
- """
- See _tz_localize_using_tzinfo_api.__doc__
- """
- return _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False, fold=fold)
-
-
def py_tz_convert_from_utc_single(int64_t utc_val, tzinfo tz):
# The 'bint* fold=NULL' in tz_convert_from_utc_single means we cannot
# make it cdef, so this is version exposed for testing from python.
@@ -608,7 +623,7 @@ cdef int64_t _tz_localize_using_tzinfo_api(
# NB: relies on dateutil internals, subject to change.
@cython.boundscheck(False)
@cython.wraparound(False)
-cdef bint infer_dateutil_fold(
+cdef bint _infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
| Cleanups following recent refactoring. | https://api.github.com/repos/pandas-dev/pandas/pulls/46926 | 2022-05-02T16:00:08Z | 2022-05-02T20:10:43Z | 2022-05-02T20:10:42Z | 2022-05-02T20:48:29Z |
Backport PR #46912 on branch 1.4.x (CI: More targeted pyarrow version testing) | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index 0a914dd965a5e..f5cbb0e88ff11 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -62,6 +62,15 @@ jobs:
pattern: "not slow and not network and not single_cpu"
pandas_testing_mode: "deprecate"
test_args: "-W error::DeprecationWarning:numpy"
+ exclude:
+ - env_file: actions-39.yaml
+ pyarrow_version: "6"
+ - env_file: actions-39.yaml
+ pyarrow_version: "7"
+ - env_file: actions-310.yaml
+ pyarrow_version: "6"
+ - env_file: actions-310.yaml
+ pyarrow_version: "7"
fail-fast: false
name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }}
env:
| Backport PR #46912: CI: More targeted pyarrow version testing | https://api.github.com/repos/pandas-dev/pandas/pulls/46925 | 2022-05-02T15:01:10Z | 2022-05-02T20:31:43Z | 2022-05-02T20:31:43Z | 2022-05-02T20:31:44Z |
STYL: a few cleanups in pyi files | diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index 5c7be5e660fd9..a6d593076777d 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -144,26 +144,13 @@ class HashTable:
np.ndarray, # np.ndarray[subclass-specific]
npt.NDArray[np.intp],
] | np.ndarray: ... # np.ndarray[subclass-specific]
- def _unique(
- self,
- values: np.ndarray, # np.ndarray[subclass-specific]
- uniques, # FooVector
- count_prior: int = ...,
- na_sentinel: int = ...,
- na_value: object = ...,
- ignore_na: bool = ...,
- return_inverse: bool = ...,
- ) -> tuple[
- np.ndarray, # np.ndarray[subclass-specific]
- npt.NDArray[np.intp],
- ] | np.ndarray: ... # np.ndarray[subclass-specific]
def factorize(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
na_sentinel: int = ...,
na_value: object = ...,
mask=...,
- ) -> tuple[np.ndarray, npt.NDArray[np.intp],]: ... # np.ndarray[subclass-specific]
+ ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
class Complex128HashTable(HashTable): ...
class Complex64HashTable(HashTable): ...
@@ -175,7 +162,7 @@ class Int64HashTable(HashTable):
def get_labels_groupby(
self,
values: npt.NDArray[np.int64], # const int64_t[:]
- ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64],]: ...
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ...
def map_keys_to_values(
self,
keys: npt.NDArray[np.int64],
@@ -198,13 +185,13 @@ def duplicated(
keep: Literal["last", "first", False] = ...,
) -> npt.NDArray[np.bool_]: ...
def mode(
- values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ...
) -> np.ndarray: ...
def value_count(
values: np.ndarray,
dropna: bool,
- mask: npt.NDArray[np.bool_] | None = None,
-) -> tuple[np.ndarray, npt.NDArray[np.int64],]: ... # np.ndarray[same-as-values]
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ... # np.ndarray[same-as-values]
# arr and values should have same dtype
def ismember(
diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi
index 3d0288160e386..ca6f301673f33 100644
--- a/pandas/_libs/tslibs/conversion.pyi
+++ b/pandas/_libs/tslibs/conversion.pyi
@@ -14,7 +14,7 @@ class OutOfBoundsTimedelta(ValueError): ...
def precision_from_unit(
unit: str,
-) -> tuple[int, int,]: ... # (int64_t, _)
+) -> tuple[int, int]: ... # (int64_t, _)
def ensure_datetime64ns(
arr: np.ndarray, # np.ndarray[datetime64[ANY]]
copy: bool = ...,
@@ -25,5 +25,5 @@ def ensure_timedelta64ns(
) -> np.ndarray: ... # np.ndarray[timedelta64ns]
def datetime_to_datetime64(
values: npt.NDArray[np.object_],
-) -> tuple[np.ndarray, tzinfo | None,]: ... # (np.ndarray[dt64ns], _)
+) -> tuple[np.ndarray, tzinfo | None]: ... # (np.ndarray[dt64ns], _)
def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index ab66677a8be3a..31ed25791389f 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -1,7 +1,5 @@
from enum import Enum
-from pandas._libs.tslibs.offsets import BaseOffset
-
# These are not public API, but are exposed in the .pyi file because they
# are imported in tests.
_attrname_to_abbrevs: dict[str, str]
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index c5ffaba37f51f..13daba5cfcbdf 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -16,7 +16,6 @@ import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
- NaTType,
Period,
Tick,
Timedelta,
diff --git a/pandas/conftest.py b/pandas/conftest.py
index ecdc0f10b1f56..9d98478010c97 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -17,7 +17,6 @@
- Dtypes
- Misc
"""
-# pyright: reportUntypedFunctionDecorator = false
from collections import abc
from datetime import (
diff --git a/pyproject.toml b/pyproject.toml
index c5f89076a29fa..7b32c5f8eab49 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -153,34 +153,21 @@ pythonVersion = "3.8"
typeCheckingMode = "basic"
include = ["pandas", "typings"]
exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"]
+# enable subset of "strict"
+reportDuplicateImport = true
+reportInvalidStubStatement = true
+reportPropertyTypeMismatch = true
+reportUntypedClassDecorator = true
+reportUntypedFunctionDecorator = true
+reportUntypedNamedTuple = true
+# disable subset of "basic"
reportGeneralTypeIssues = false
-reportConstantRedefinition = false
-reportFunctionMemberAccess = false
-reportImportCycles = false
-reportIncompatibleMethodOverride = false
-reportIncompatibleVariableOverride = false
reportMissingModuleSource = false
-reportMissingParameterType = false
-reportMissingTypeArgument = false
-reportMissingTypeStubs = false
reportOptionalCall = false
reportOptionalIterable = false
reportOptionalMemberAccess = false
reportOptionalOperand = false
reportOptionalSubscript = false
reportPrivateImportUsage = false
-reportPrivateUsage = false
reportUnboundVariable = false
-reportUnknownArgumentType = false
-reportUnknownLambdaType = false
-reportUnknownMemberType = false
-reportUnknownParameterType = false
-reportUnknownVariableType = false
-reportUnnecessaryComparison = false
-reportUnnecessaryIsInstance = false
reportUnsupportedDunderAll = false
-reportUntypedBaseClass = false
-reportUnusedClass = false
-reportUnusedFunction = false
-reportUnusedImport = false
-reportUnusedVariable = false
| Might make sense to run flake8 on pyi files (add flake8-pyi to pre-commit) | https://api.github.com/repos/pandas-dev/pandas/pulls/46921 | 2022-05-01T21:53:51Z | 2022-05-03T09:36:56Z | 2022-05-03T09:36:56Z | 2022-05-26T01:59:22Z |
TYP: fix a few annotations in offsets.pyi | diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index 4cc301018e8f8..9410379b16ba2 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -9,9 +9,7 @@ from typing import (
Any,
Collection,
Literal,
- Tuple,
TypeVar,
- Union,
overload,
)
@@ -101,7 +99,7 @@ def _get_offset(name: str) -> BaseOffset: ...
class SingleConstructorOffset(BaseOffset):
@classmethod
- def _from_name(cls, suffix=...): ...
+ def _from_name(cls, suffix: None = ...): ...
def __reduce__(self): ...
@overload
@@ -132,7 +130,7 @@ class RelativeDeltaOffset(BaseOffset):
class BusinessMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
- ): ...
+ ) -> None: ...
class BusinessDay(BusinessMixin): ...
@@ -144,14 +142,17 @@ class BusinessHour(BusinessMixin):
start: str | Collection[str] = ...,
end: str | Collection[str] = ...,
offset: timedelta = ...,
- ): ...
+ ) -> None: ...
-class WeekOfMonthMixin(SingleConstructorOffset): ...
+class WeekOfMonthMixin(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., weekday: int = ...
+ ) -> None: ...
class YearOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., month: int | None = ...
- ): ...
+ ) -> None: ...
class BYearEnd(YearOffset): ...
class BYearBegin(YearOffset): ...
@@ -186,7 +187,11 @@ class Week(SingleConstructorOffset):
self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
) -> None: ...
-class WeekOfMonth(WeekOfMonthMixin): ...
+class WeekOfMonth(WeekOfMonthMixin):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
+ ) -> None: ...
+
class LastWeekOfMonth(WeekOfMonthMixin): ...
class FY5253Mixin(SingleConstructorOffset):
@@ -196,11 +201,22 @@ class FY5253Mixin(SingleConstructorOffset):
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
- variation: str = ...,
+ variation: Literal["nearest", "last"] = ...,
) -> None: ...
class FY5253(FY5253Mixin): ...
-class FY5253Quarter(FY5253Mixin): ...
+
+class FY5253Quarter(FY5253Mixin):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekday: int = ...,
+ startingMonth: int = ...,
+ qtr_with_extra_week: int = ...,
+ variation: Literal["nearest", "last"] = ...,
+ ) -> None: ...
+
class Easter(SingleConstructorOffset): ...
class _CustomBusinessMonth(BusinessMixin):
@@ -208,29 +224,35 @@ class _CustomBusinessMonth(BusinessMixin):
self,
n: int = ...,
normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
offset: timedelta = ...,
- holidays: None | list = ...,
- ): ...
+ ) -> None: ...
class CustomBusinessDay(BusinessDay):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
- offset: timedelta = ...,
weekmask: str = ...,
- ): ...
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
class CustomBusinessHour(BusinessHour):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
start: str = ...,
end: str = ...,
offset: timedelta = ...,
- holidays: None | list = ...,
- ): ...
+ ) -> None: ...
class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 00ef4fcbf8986..6c96df9a7ea0b 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -3291,7 +3291,7 @@ cdef class CustomBusinessDay(BusinessDay):
holidays : list
List/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``.
- calendar : pd.HolidayCalendar or np.busdaycalendar
+ calendar : np.busdaycalendar
offset : timedelta, default timedelta(0)
"""
@@ -3417,7 +3417,7 @@ cdef class _CustomBusinessMonth(BusinessMixin):
holidays : list
List/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``.
- calendar : pd.HolidayCalendar or np.busdaycalendar
+ calendar : np.busdaycalendar
Calendar to integrate.
offset : timedelta, default timedelta(0)
Time offset to apply.
| - [ ] closes #46908
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Some doc-strings refer to `pd.HolidayCalendar` but that doesn't seem to exist?!
There are a few more un-annotated variables and a few containers (list and dict) without types but I'm not familair enough with them to annotate them. | https://api.github.com/repos/pandas-dev/pandas/pulls/46920 | 2022-05-01T18:18:51Z | 2022-05-03T11:58:49Z | 2022-05-03T11:58:48Z | 2022-05-26T01:59:21Z |
TYP: fix MultiIndex._names type | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 51ca9dbd763b4..6b7aca1b6c4ee 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10420,12 +10420,14 @@ def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
else:
mask = index_mask.reshape(-1, 1) & values_mask
- if isinstance(level, str):
- level = count_axis._get_level_number(level)
+ if isinstance(level, int):
+ level_number = level
+ else:
+ level_number = count_axis._get_level_number(level)
- level_name = count_axis._names[level]
- level_index = count_axis.levels[level]._rename(name=level_name)
- level_codes = ensure_platform_int(count_axis.codes[level])
+ level_name = count_axis._names[level_number]
+ level_index = count_axis.levels[level_number]._rename(name=level_name)
+ level_codes = ensure_platform_int(count_axis.codes[level_number])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 752ce28c58f55..29df930c5aaf3 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -287,7 +287,7 @@ class MultiIndex(Index):
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
- _names = FrozenList()
+ _names: list[Hashable | None] = []
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
@@ -326,9 +326,7 @@ def __new__(
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
- # Incompatible types in assignment (expression has type "List[None]",
- # variable has type "FrozenList") [assignment]
- result._names = [None] * len(levels) # type: ignore[assignment]
+ result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
@@ -1476,8 +1474,7 @@ def _set_names(self, names, *, level=None, validate: bool = True):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
- # error: Cannot determine type of '__setitem__'
- self._names[lev] = name # type: ignore[has-type]
+ self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
| xref https://github.com/pandas-dev/pandas/issues/37715
_names was never really a FrozenList.
Also adapted some related code in _count_level
The change in _count_level is required because `list` cannot be indexed by `Hashable` - this issue had not been detected by mypy up until now because the argument of `__getitem__` is untyped for `FrozenList`. | https://api.github.com/repos/pandas-dev/pandas/pulls/46919 | 2022-05-01T16:40:03Z | 2022-05-21T20:04:21Z | 2022-05-21T20:04:20Z | 2022-05-21T20:04:25Z |
REF: stronger typing in _box_func | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5c8c6d7fe23a3..5859f051ab343 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -538,13 +538,10 @@ def _check_compatible_with(self, other, setitem: bool = False):
# -----------------------------------------------------------------
# Descriptive Properties
- def _box_func(self, x) -> Timestamp | NaTType:
- if isinstance(x, np.datetime64):
- # GH#42228
- # Argument 1 to "signedinteger" has incompatible type "datetime64";
- # expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
- x = np.int64(x) # type: ignore[arg-type]
- ts = Timestamp(x, tz=self.tz)
+ def _box_func(self, x: np.datetime64) -> Timestamp | NaTType:
+ # GH#42228
+ value = x.view("i8")
+ ts = Timestamp(value, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 2c6e7119b478d..1f55842050df0 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -154,7 +154,7 @@ class TimedeltaArray(dtl.TimelikeOps):
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
- def _box_func(self, x) -> Timedelta | NaTType:
+ def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
return Timedelta(x, unit="ns")
@property
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 1a897dba6ac80..713d80c26ef7a 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -16,7 +16,6 @@
from pandas._libs import (
NaT,
NaTType,
- Timedelta,
iNaT,
lib,
)
@@ -367,19 +366,23 @@ def _wrap_results(result, dtype: np.dtype, fill_value=None):
result = np.datetime64("NaT", "ns")
else:
result = np.int64(result).view("datetime64[ns]")
+ # retain original unit
+ result = result.astype(dtype, copy=False)
else:
# If we have float dtype, taking a view will give the wrong result
result = result.astype(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
- if result == fill_value:
- result = np.nan
+ if result == fill_value or np.isnan(result):
+ result = np.timedelta64("NaT").astype(dtype)
- # raise if we have a timedelta64[ns] which is too large
- if np.fabs(result) > lib.i8max:
+ elif np.fabs(result) > lib.i8max:
+ # raise if we have a timedelta64[ns] which is too large
raise ValueError("overflow in timedelta operation")
+ else:
+ # return a timedelta64 with the original unit
+ result = np.int64(result).astype(dtype, copy=False)
- result = Timedelta(result, unit="ns")
else:
result = result.astype("m8[ns]").view(dtype)
@@ -641,7 +644,7 @@ def _mask_datetimelike_result(
result[axis_mask] = iNaT # type: ignore[index]
else:
if mask.any():
- return NaT
+ return np.int64(iNaT).view(orig_values.dtype)
return result
diff --git a/pandas/tests/arrays/timedeltas/test_reductions.py b/pandas/tests/arrays/timedeltas/test_reductions.py
index 586a9187fc169..72d45f5b9a78c 100644
--- a/pandas/tests/arrays/timedeltas/test_reductions.py
+++ b/pandas/tests/arrays/timedeltas/test_reductions.py
@@ -147,7 +147,7 @@ def test_std(self, add):
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=True)
- assert isinstance(result, Timedelta)
+ assert isinstance(result, np.timedelta64)
assert result == expected
result = arr.std(skipna=False)
@@ -158,7 +158,8 @@ def test_std(self, add):
if getattr(arr, "tz", None) is None:
result = nanops.nanstd(np.asarray(arr), skipna=False)
- assert result is pd.NaT
+ assert isinstance(result, np.timedelta64)
+ assert np.isnat(result)
def test_median(self):
tdi = pd.TimedeltaIndex(["0H", "3H", "NaT", "5H06m", "0H", "2H"])
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 240b9dacce73a..005f7b088271f 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1020,7 +1020,8 @@ def test_nanmean_skipna_false(self, dtype):
arr[-1, -1] = "NaT"
result = nanops.nanmean(arr, skipna=False)
- assert result is pd.NaT
+ assert np.isnat(result)
+ assert result.dtype == dtype
result = nanops.nanmean(arr, axis=0, skipna=False)
expected = np.array([4, 5, "NaT"], dtype=arr.dtype)
| Necessary for non-nano support | https://api.github.com/repos/pandas-dev/pandas/pulls/46917 | 2022-05-01T15:22:50Z | 2022-05-02T14:58:31Z | 2022-05-02T14:58:31Z | 2022-05-02T15:47:23Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 767ef62bb1758..bd095c03e6fdb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -26,7 +26,7 @@ repos:
types_or: [python, rst, markdown]
files: ^(pandas|doc)/
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.1.0
+ rev: v4.2.0
hooks:
- id: debug-statements
- id: end-of-file-fixer
@@ -56,7 +56,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
- rev: v2.31.1
+ rev: v2.32.0
hooks:
- id: pyupgrade
args: [--py38-plus]
@@ -71,7 +71,7 @@ repos:
types: [text] # overwrite types: [rst]
types_or: [python, rst]
- repo: https://github.com/sphinx-contrib/sphinx-lint
- rev: v0.2
+ rev: v0.4.1
hooks:
- id: sphinx-lint
- repo: https://github.com/asottile/yesqa
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/main)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/build/src/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.18.1-py2.py3-none-any.whl (197 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 197.8/197.8 KB 23.1 MB/s eta 0:00:00
Collecting cfgv>=2.0.0
Downloading cfgv-3.3.1-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-2.5.0-py2.py3-none-any.whl (98 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 98.6/98.6 KB 21.6 MB/s eta 0:00:00
Collecting nodeenv>=0.11.1
Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB)
Collecting pyyaml>=5.1
Downloading PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (682 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 682.2/682.2 KB 43.2 MB/s eta 0:00:00
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.14.1-py2.py3-none-any.whl (8.8 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.8/8.8 MB 98.1 MB/s eta 0:00:00
Collecting toml
Downloading toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting filelock<4,>=3.2
Downloading filelock-3.6.0-py3-none-any.whl (10.0 kB)
Collecting six<2,>=1.9.0
Downloading six-1.16.0-py2.py3-none-any.whl (11 kB)
Collecting distlib<1,>=0.3.1
Downloading distlib-0.3.4-py2.py3-none-any.whl (461 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 461.2/461.2 KB 68.9 MB/s eta 0:00:00
Collecting platformdirs<3,>=2
Downloading platformdirs-2.5.2-py3-none-any.whl (14 kB)
Installing collected packages: nodeenv, distlib, toml, six, pyyaml, platformdirs, identify, filelock, cfgv, virtualenv, pre-commit
Successfully installed cfgv-3.3.1 distlib-0.3.4 filelock-3.6.0 identify-2.5.0 nodeenv-1.6.0 platformdirs-2.5.2 pre-commit-2.18.1 pyyaml-6.0 six-1.16.0 toml-0.10.2 virtualenv-20.14.1
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports.
already up to date.
Updating https://github.com/jendrikseipp/vulture ... [INFO] Initializing environment for https://github.com/jendrikseipp/vulture.
already up to date.
Updating https://github.com/python/black ... [INFO] Initializing environment for https://github.com/python/black.
already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
updating v4.1.0 -> v4.2.0.
Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint.
already up to date.
Updating https://github.com/PyCQA/flake8 ... [INFO] Initializing environment for https://github.com/PyCQA/flake8.
already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
already up to date.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
updating v2.31.1 -> v2.32.0.
Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks.
already up to date.
Updating https://github.com/sphinx-contrib/sphinx-lint ... [INFO] Initializing environment for https://github.com/sphinx-contrib/sphinx-lint.
updating v0.2 -> v0.4.1.
Updating https://github.com/asottile/yesqa ... [INFO] Initializing environment for https://github.com/asottile/yesqa.
already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Initializing environment for https://github.com/PyCQA/flake8:flake8-bugbear==21.3.2,flake8-comprehensions==3.7.0,flake8==4.0.1,pandas-dev-flaker==0.5.0.
[INFO] Initializing environment for https://github.com/asottile/yesqa:flake8-bugbear==21.3.2,flake8-comprehensions==3.7.0,flake8==4.0.1,pandas-dev-flaker==0.5.0.
[INFO] Initializing environment for local:pyright@1.1.230.
[INFO] Initializing environment for local:flake8-rst==0.7.0,flake8==3.7.9.
[INFO] Initializing environment for local:pyyaml,toml.
[INFO] Initializing environment for local:pyyaml.
[INFO] Initializing environment for local.
[INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/jendrikseipp/vulture.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/cpplint/cpplint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/sphinx-contrib/sphinx-lint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
absolufy-imports........................................................................................Passed
vulture.................................................................................................Passed
black...................................................................................................Passed
codespell...............................................................................................Passed
debug statements (python)...............................................................................Passed
fix end of files........................................................................................Passed
trim trailing whitespace................................................................................Passed
cpplint.................................................................................................Passed
flake8..................................................................................................Passed
isort...................................................................................................Passed
pyupgrade...............................................................................................Passed
rst ``code`` is two backticks...........................................................................Passed
rst directives end with two colons......................................................................Passed
rst ``inline code`` next to normal text.................................................................Passed
Sphinx lint.............................................................................................Passed
Strip unnecessary `# noqa`s.............................................................................Passed
flake8-rst..............................................................................................Failed
- hook id: flake8-rst
- exit code: 1
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 35
df.plot.<TAB> # noqa: E225, E999
^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 486, in run_ast_checks
ast = self.processor.build_ast()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/processor.py", line 212, in build_ast
return compile("".join(self.lines), "", "exec", PyCF_ONLY_AST)
File "", line 203
df.foo<TAB> # noqa: E225, E999
^^^^^^^^^^^^^^^^^^
SyntaxError: invalid syntax
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/bin/flake8-rst", line 8, in <module>
sys.exit(main())
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8_rst/cli.py", line 16, in main
app.run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 393, in run
self._run(argv)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 381, in _run
self.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/main/application.py", line 300, in run_checks
self.file_checker_manager.run()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 331, in run
self.run_serial()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 315, in run_serial
checker.run_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 598, in run_checks
self.run_ast_checks()
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 488, in run_ast_checks
row, column = self._extract_syntax_information(e)
File "/home/runner/.cache/pre-commit/repo5hy5wpdj/py_env-python3.10/lib/python3.10/site-packages/flake8/checker.py", line 473, in _extract_syntax_information
lines = physical_line.rstrip("\n").split("\n")
AttributeError: 'int' object has no attribute 'rstrip'
Unwanted patterns.......................................................................................Passed
Check Cython casting is `<type>obj`, not `<type> obj`...................................................Passed
Check for backticks incorrectly rendering because of missing spaces.....................................Passed
Check for unnecessary random seeds in asv benchmarks....................................................Passed
Check for usage of numpy testing or array_equal.........................................................Passed
Check for invalid EA testing............................................................................Passed
Generate pip dependency from conda......................................................................Passed
Check flake8 version is synced across flake8, yesqa, and environment.yml................................Passed
Validate correct capitalization among titles in documentation...........................................Passed
Import pandas.array as pd_array in core.................................................................Passed
Use pandas.io.common.urlopen instead of urllib.request.urlopen..........................................Passed
Use bool_t instead of bool in pandas/core/generic.py....................................................Passed
Ensure pandas errors are documented in doc/source/reference/testing.rst.................................Passed
Check for pg8000 not installed on CI for test_pg8000_sqlalchemy_passthrough_error.......................Passed
Check minimum version of dependencies are aligned.......................................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/46915 | 2022-05-01T07:08:27Z | 2022-05-01T09:15:29Z | 2022-05-01T09:15:29Z | 2022-05-07T03:09:21Z |
TYP: fix type annotation in _has_externally_shared_axis | diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 30af4f90d6869..bfbf77e85afd3 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -328,13 +328,13 @@ def _remove_labels_from_axis(axis: Axis):
axis.get_label().set_visible(False)
-def _has_externally_shared_axis(ax1: matplotlib.axes, compare_axis: str) -> bool:
+def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool:
"""
Return whether an axis is externally shared.
Parameters
----------
- ax1 : matplotlib.axes
+ ax1 : matplotlib.axes.Axes
Axis to query.
compare_axis : str
`"x"` or `"y"` according to whether the X-axis or Y-axis is being
| `from matplotlib import axes` is a module, not the axes class. | https://api.github.com/repos/pandas-dev/pandas/pulls/46914 | 2022-05-01T04:17:25Z | 2022-05-07T02:29:24Z | 2022-05-07T02:29:24Z | 2022-05-26T01:59:20Z |
CI: More targeted pyarrow version testing | diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml
index 8a16ef4020b14..f5cbb0e88ff11 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/posix.yml
@@ -28,7 +28,7 @@ jobs:
pattern: ["not single_cpu", "single_cpu"]
# Don't test pyarrow v2/3: Causes timeouts in read_csv engine
# even if tests are skipped/xfailed
- pyarrow_version: ["5", "7"]
+ pyarrow_version: ["5", "6", "7"]
include:
- name: "Downstream Compat"
env_file: actions-38-downstream_compat.yaml
@@ -62,6 +62,15 @@ jobs:
pattern: "not slow and not network and not single_cpu"
pandas_testing_mode: "deprecate"
test_args: "-W error::DeprecationWarning:numpy"
+ exclude:
+ - env_file: actions-39.yaml
+ pyarrow_version: "6"
+ - env_file: actions-39.yaml
+ pyarrow_version: "7"
+ - env_file: actions-310.yaml
+ pyarrow_version: "6"
+ - env_file: actions-310.yaml
+ pyarrow_version: "7"
fail-fast: false
name: ${{ matrix.name || format('{0} pyarrow={1} {2}', matrix.env_file, matrix.pyarrow_version, matrix.pattern) }}
env:
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
Hoping to supersede https://github.com/pandas-dev/pandas/pull/46386. cc @lithomas1
While the job queue bandwidth is larger now, probably don't need to test different pyarrow versions with different Python versions. Instead:
* PY38 will test pyarrow 5, 6, 7
* PY39 and PY310 will just test pyarrow 5 | https://api.github.com/repos/pandas-dev/pandas/pulls/46912 | 2022-05-01T00:56:14Z | 2022-05-02T14:59:26Z | 2022-05-02T14:59:25Z | 2022-05-02T21:37:08Z |
DOC: update shortened link to full (#46899) | diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 1904ce32f3170..7421645baa463 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -434,9 +434,9 @@ def _str_rstrip(self, to_strip=None):
return self._str_map(lambda x: x.rstrip(to_strip))
def _str_removeprefix(self, prefix: str) -> Series:
- # outstanding question on whether to use native methods for users
- # on Python 3.9+ https://bit.ly/3LuMeRn, in which case we could do
- # return self._str_map(str.removeprefix)
+ # outstanding question on whether to use native methods for users on Python 3.9+
+ # https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770,
+ # in which case we could do return self._str_map(str.removeprefix)
def removeprefix(text: str) -> str:
if text.startswith(prefix):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46907 | 2022-04-30T16:13:27Z | 2022-04-30T18:37:20Z | 2022-04-30T18:37:20Z | 2022-04-30T18:37:37Z |
DEPR: numeric_only default in DataFrame methods with None/True | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 931d18dc349f3..aa42d4236484b 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -120,7 +120,7 @@ Other enhancements
- :meth:`DataFrame.reset_index` now accepts a ``names`` argument which renames the index names (:issue:`6878`)
- :meth:`pd.concat` now raises when ``levels`` is given but ``keys`` is None (:issue:`46653`)
- :meth:`pd.concat` now raises when ``levels`` contains duplicate values (:issue:`46653`)
-- Added ``numeric_only`` argument to :meth:`DataFrame.corr`, :meth:`DataFrame.corrwith`, :meth:`DataFrame.cov`, :meth:`DataFrame.idxmin`, :meth:`DataFrame.idxmax`, :meth:`.GroupBy.idxmin`, :meth:`.GroupBy.idxmax`, :meth:`.GroupBy.var`, :meth:`.GroupBy.std`, :meth:`.GroupBy.sem`, and :meth:`.GroupBy.quantile` (:issue:`46560`)
+- Added ``numeric_only`` argument to :meth:`DataFrame.corr`, :meth:`DataFrame.corrwith`, :meth:`DataFrame.cov`, :meth:`DataFrame.idxmin`, :meth:`DataFrame.idxmax`, :meth:`.DataFrameGroupBy.idxmin`, :meth:`.DataFrameGroupBy.idxmax`, :meth:`.GroupBy.var`, :meth:`.GroupBy.std`, :meth:`.GroupBy.sem`, and :meth:`.DataFrameGroupBy.quantile` (:issue:`46560`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`, :issue:`46725`)
- Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`)
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
@@ -426,6 +426,48 @@ As ``group_keys=True`` is the default value of :meth:`DataFrame.groupby` and
raise a ``FutureWarning``. This can be silenced and the previous behavior
retained by specifying ``group_keys=False``.
+.. _whatsnew_150.deprecations.numeric_only_default:
+
+``numeric_only`` default value
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Across the DataFrame operations such as ``min``, ``sum``, and ``idxmax``, the default
+value of the ``numeric_only`` argument, if it exists at all, was inconsistent.
+Furthermore, operations with the default value ``None`` can lead to surprising
+results. (:issue:`46560`)
+
+.. code-block:: ipython
+
+ In [1]: df = pd.DataFrame({"a": [1, 2], "b": ["x", "y"]})
+
+ In [2]: # Reading the next line without knowing the contents of df, one would
+ # expect the result to contain the products for both columns a and b.
+ df[["a", "b"]].prod()
+ Out[2]:
+ a 2
+ dtype: int64
+
+To avoid this behavior, the specifying the value ``numeric_only=None`` has been
+deprecated, and will be removed in a future version of pandas. In the future,
+all operations with a ``numeric_only`` argument will default to ``False``. Users
+should either call the operation only with columns that can be operated on, or
+specify ``numeric_only=True`` to operate only on Boolean, integer, and float columns.
+
+In order to support the transition to the new behavior, the following methods have
+gained the ``numeric_only`` argument.
+
+- :meth:`DataFrame.corr`
+- :meth:`DataFrame.corrwith`
+- :meth:`DataFrame.cov`
+- :meth:`DataFrame.idxmin`
+- :meth:`DataFrame.idxmax`
+- :meth:`.DataFrameGroupBy.idxmin`
+- :meth:`.DataFrameGroupBy.idxmax`
+- :meth:`.GroupBy.var`
+- :meth:`.GroupBy.std`
+- :meth:`.GroupBy.sem`
+- :meth:`.DataFrameGroupBy.quantile`
+
.. _whatsnew_150.deprecations.other:
Other Deprecations
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 90f665362ef56..098b501cc95c9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -635,3 +635,63 @@ def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
list of column names with the None values replaced.
"""
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
+
+
+def resolve_numeric_only(numeric_only: bool | None | lib.NoDefault) -> bool:
+ """Determine the Boolean value of numeric_only.
+
+ See GH#46560 for details on the deprecation.
+
+ Parameters
+ ----------
+ numeric_only : bool, None, or lib.no_default
+ Value passed to the method.
+
+ Returns
+ -------
+ Resolved value of numeric_only.
+ """
+ if numeric_only is lib.no_default:
+ # Methods that behave like numeric_only=True and only got the numeric_only
+ # arg in 1.5.0 default to lib.no_default
+ result = True
+ elif numeric_only is None:
+ # Methods that had the numeric_only arg prior to 1.5.0 and try all columns
+ # first default to None
+ result = False
+ else:
+ result = cast(bool, numeric_only)
+ return result
+
+
+def deprecate_numeric_only_default(cls: type, name: str, deprecate_none: bool = False):
+ """Emit FutureWarning message for deprecation of numeric_only.
+
+ See GH#46560 for details on the deprecation.
+
+ Parameters
+ ----------
+ cls : type
+ pandas type that is generating the warning.
+ name : str
+ Name of the method that is generating the warning.
+ deprecate_none : bool, default False
+ Whether to also warn about the deprecation of specifying ``numeric_only=None``.
+ """
+ if name in ["all", "any"]:
+ arg_name = "bool_only"
+ else:
+ arg_name = "numeric_only"
+
+ msg = (
+ f"The default value of {arg_name} in {cls.__name__}.{name} is "
+ "deprecated. In a future version, it will default to False. "
+ )
+ if deprecate_none:
+ msg += f"In addition, specifying '{arg_name}=None' is deprecated. "
+ msg += (
+ f"Select only valid columns or specify the value of {arg_name} to silence "
+ "this warning."
+ )
+
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ef5e6dd1d6757..84ea8df0b9b20 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9833,7 +9833,7 @@ def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
min_periods: int = 1,
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
@@ -9859,6 +9859,10 @@ def corr(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
DataFrame
@@ -9897,10 +9901,11 @@ def corr(
dogs 1.0 NaN
cats NaN 1.0
""" # noqa:E501
- if numeric_only:
- data = self._get_numeric_data()
- else:
- data = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ data = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "corr")
+
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
@@ -9946,7 +9951,7 @@ def cov(
self,
min_periods: int | None = None,
ddof: int | None = 1,
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
@@ -9983,6 +9988,10 @@ def cov(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
DataFrame
@@ -10051,10 +10060,11 @@ def cov(
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
- if numeric_only:
- data = self._get_numeric_data()
- else:
- data = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ data = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "cov")
+
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
@@ -10077,7 +10087,7 @@ def corrwith(
axis: Axis = 0,
drop=False,
method="pearson",
- numeric_only: bool = True,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
) -> Series:
"""
Compute pairwise correlation.
@@ -10110,6 +10120,10 @@ def corrwith(
.. versionadded:: 1.5.0
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
Returns
-------
Series
@@ -10141,10 +10155,10 @@ def corrwith(
dtype: float64
""" # noqa:E501
axis = self._get_axis_number(axis)
- if numeric_only:
- this = self._get_numeric_data()
- else:
- this = self
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
+ this = self._get_numeric_data() if numeric_only_bool else self
+ if numeric_only is lib.no_default and len(this.columns) < len(self.columns):
+ com.deprecate_numeric_only_default(type(self), "corrwith")
# GH46174: when other is a Series object and axis=0, we achieve a speedup over
# passing .corr() to .apply() by taking the columns as ndarrays and iterating
@@ -10396,7 +10410,6 @@ def _reduce(
filter_type=None,
**kwds,
):
-
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
@@ -10451,6 +10464,7 @@ def _get_data() -> DataFrame:
data = self._get_bool_data()
return data
+ numeric_only_bool = com.resolve_numeric_only(numeric_only)
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
@@ -10458,7 +10472,7 @@ def _get_data() -> DataFrame:
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
- if numeric_only is True:
+ if numeric_only_bool:
df = _get_data()
if axis == 1:
df = df.T
@@ -10479,16 +10493,8 @@ def _get_data() -> DataFrame:
if numeric_only is None and out.shape[0] != df.shape[1]:
# columns have been dropped GH#41480
- arg_name = "numeric_only"
- if name in ["all", "any"]:
- arg_name = "bool_only"
- warnings.warn(
- "Dropping of nuisance columns in DataFrame reductions "
- f"(with '{arg_name}=None') is deprecated; in a future "
- "version this will raise TypeError. Select only valid "
- "columns before calling the reduction.",
- FutureWarning,
- stacklevel=find_stack_level(),
+ com.deprecate_numeric_only_default(
+ type(self), name, deprecate_none=True
)
return out
@@ -10776,6 +10782,11 @@ def quantile(
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
+
+ .. deprecated:: 1.5.0
+ The default value of ``numeric_only`` will be ``False`` in a future
+ version of pandas.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
@@ -10833,15 +10844,8 @@ def quantile(
axis = self._get_axis_number(axis)
any_not_numeric = any(not is_numeric_dtype(x) for x in self.dtypes)
if numeric_only is no_default and any_not_numeric:
- warnings.warn(
- "In future versions of pandas, numeric_only will be set to "
- "False by default, and the datetime/timedelta columns will "
- "be considered in the results. To not consider these columns"
- "specify numeric_only=True.",
- FutureWarning,
- stacklevel=find_stack_level(),
- )
- numeric_only = True
+ com.deprecate_numeric_only_default(type(self), "quantile")
+ numeric_only = com.resolve_numeric_only(numeric_only)
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c615216240d60..1a31a50606c2c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -11554,6 +11554,11 @@ def _doc_params(cls):
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
+
+ .. deprecated:: 1.5.0
+ Specifying ``numeric_only=None`` is deprecated. The default value will be
+ ``False`` in a future version of pandas.
+
{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
@@ -11584,6 +11589,10 @@ def _doc_params(cls):
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
+ .. deprecated:: 1.5.0
+ Specifying ``numeric_only=None`` is deprecated. The default value will be
+ ``False`` in a future version of pandas.
+
Returns
-------
{name1} or {name2} (if level specified) \
diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py
index 3a86aa05fb227..2f0a4195d2f74 100644
--- a/pandas/tests/frame/methods/test_cov_corr.py
+++ b/pandas/tests/frame/methods/test_cov_corr.py
@@ -41,7 +41,10 @@ def test_cov(self, float_frame, float_string_frame):
tm.assert_almost_equal(result["A"]["C"], expected)
# exclude non-numeric types
- result = float_string_frame.cov()
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
@@ -116,7 +119,10 @@ def test_corr_scipy_method(self, float_frame, method):
def test_corr_non_numeric(self, float_string_frame):
# exclude non-numeric types
- result = float_string_frame.corr()
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = float_string_frame.corr()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
tm.assert_frame_equal(result, expected)
@@ -307,11 +313,17 @@ def test_corrwith_with_objects(self):
df1["obj"] = "foo"
df2["obj"] = "bar"
- result = df1.corrwith(df2)
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
- result = df1.corrwith(df2, axis=1)
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
+ result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 41deeec7c4b57..7f2a13862f4ed 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1,11 +1,13 @@
from datetime import timedelta
from decimal import Decimal
+import inspect
import re
from dateutil.tz import tzlocal
import numpy as np
import pytest
+from pandas._libs import lib
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
@@ -1752,7 +1754,9 @@ def test_groupby_regular_arithmetic_equivalent(meth):
def test_frame_mixed_numeric_object_with_timestamp(ts_value):
# GH 13912
df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]})
- with tm.assert_produces_warning(FutureWarning, match="Dropping of nuisance"):
+ with tm.assert_produces_warning(
+ FutureWarning, match="The default value of numeric_only"
+ ):
result = df.sum()
expected = Series([1, 1.1, "foo"], index=list("abc"))
tm.assert_series_equal(result, expected)
@@ -1786,3 +1790,60 @@ def test_reduction_axis_none_deprecation(method):
expected = meth()
tm.assert_series_equal(res, expected)
tm.assert_series_equal(res, meth(axis=0))
+
+
+@pytest.mark.parametrize(
+ "kernel",
+ [
+ "corr",
+ "corrwith",
+ "count",
+ "cov",
+ "idxmax",
+ "idxmin",
+ "kurt",
+ "kurt",
+ "max",
+ "mean",
+ "median",
+ "min",
+ "mode",
+ "prod",
+ "prod",
+ "quantile",
+ "sem",
+ "skew",
+ "std",
+ "sum",
+ "var",
+ ],
+)
+def test_numeric_only_deprecation(kernel):
+ # GH#46852
+ df = DataFrame({"a": [1, 2, 3], "b": object})
+ args = (df,) if kernel == "corrwith" else ()
+ signature = inspect.signature(getattr(DataFrame, kernel))
+ default = signature.parameters["numeric_only"].default
+ assert default is not True
+
+ if kernel in ("idxmax", "idxmin"):
+ # kernels that default to numeric_only=False and fail on nuisance columns
+ assert default is False
+ with pytest.raises(TypeError, match="not allowed for this dtype"):
+ getattr(df, kernel)(*args)
+ else:
+ if default is None or default is lib.no_default:
+ expected = getattr(df[["a"]], kernel)(*args)
+ warn = FutureWarning
+ else:
+ # default must be False and works on any nuisance columns
+ expected = getattr(df, kernel)(*args)
+ if kernel == "mode":
+ assert "b" in expected.columns
+ else:
+ assert "b" in expected.index
+ warn = None
+ msg = f"The default value of numeric_only in DataFrame.{kernel}"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = getattr(df, kernel)(*args)
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index a5834dd237c01..b5bae4759090a 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -807,7 +807,13 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
resampled = df.resample("Y")
func = getattr(resampled, method)
- result = func(numeric_only=numeric_only)
+ if method == "prod" and numeric_only is not True:
+ warn = FutureWarning
+ else:
+ warn = None
+ msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated"
+ with tm.assert_produces_warning(warn, match=msg):
+ result = func(numeric_only=numeric_only)
expected = DataFrame(expected_data, index=expected_index)
tm.assert_frame_equal(result, expected)
| - [x] closes #46852 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
groupby ops are up next for deprecation, then onto resample and window. | https://api.github.com/repos/pandas-dev/pandas/pulls/46906 | 2022-04-30T15:47:33Z | 2022-05-05T01:22:56Z | 2022-05-05T01:22:55Z | 2022-07-26T20:44:02Z |
TYP/CI: bump mypy&pyright | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index d4a2bedcfba1a..7d24b26f5538b 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -74,7 +74,7 @@ jobs:
- name: Install pyright
# note: keep version in sync with .pre-commit-config.yaml
- run: npm install -g pyright@1.1.245
+ run: npm install -g pyright@1.1.247
- name: Build Pandas
id: build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fac09fcf70511..9469a34c8aacd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -89,7 +89,7 @@ repos:
types: [python]
stages: [manual]
# note: keep version in sync with .github/workflows/code-checks.yml
- additional_dependencies: ['pyright@1.1.245']
+ additional_dependencies: ['pyright@1.1.247']
- repo: local
hooks:
- id: flake8-rst
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9f1c4755bc54f..128fd68674f96 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -360,7 +360,7 @@ If installed, we now require:
+-----------------+-----------------+----------+---------+
| Package | Minimum Version | Required | Changed |
+=================+=================+==========+=========+
-| mypy (dev) | 0.941 | | X |
+| mypy (dev) | 0.950 | | X |
+-----------------+-----------------+----------+---------+
diff --git a/environment.yml b/environment.yml
index dc3cba3be2132..b4710e252384c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -24,7 +24,7 @@ dependencies:
- flake8-bugbear=21.3.2 # used by flake8, find likely bugs
- flake8-comprehensions=3.7.0 # used by flake8, linting of unnecessary comprehensions
- isort>=5.2.1 # check that imports are in the right order
- - mypy=0.941
+ - mypy=0.950
- pre-commit>=2.9.2
- pycodestyle # used by flake8
- pyupgrade
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 2a487fa4b6877..2e1ef31033d71 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -45,7 +45,9 @@ def set_locale(
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
- yield ".".join(normalized_locale)
+ # error: Argument 1 to "join" of "str" has incompatible type
+ # "Tuple[Optional[str], Optional[str]]"; expected "Iterable[str]"
+ yield ".".join(normalized_locale) # type: ignore[arg-type]
else:
yield new_locale
finally:
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index 13daba5cfcbdf..4be9621a594dc 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -113,7 +113,7 @@ class Timestamp(datetime):
def time(self) -> _time: ...
def timetz(self) -> _time: ...
def replace(
- self,
+ self: _DatetimeT,
year: int = ...,
month: int = ...,
day: int = ...,
@@ -123,7 +123,7 @@ class Timestamp(datetime):
microsecond: int = ...,
tzinfo: _tzinfo | None = ...,
fold: int = ...,
- ) -> datetime: ...
+ ) -> _DatetimeT: ...
def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ...
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index a7bb9520841b6..3616e3512c6fe 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -957,7 +957,7 @@ def equals(self, other) -> bool:
return array_equivalent(left, right, dtype_equal=True)
def _quantile(
- self: BaseMaskedArrayT, qs: npt.NDArray[np.float64], interpolation: str
+ self, qs: npt.NDArray[np.float64], interpolation: str
) -> BaseMaskedArray:
"""
Dispatch to quantile_with_mask, needed because we do not have
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 2851ea36c8a33..97ec297db8ba8 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -7,6 +7,7 @@
from enum import Enum
import itertools
from typing import (
+ Any,
Callable,
DefaultDict,
Hashable,
@@ -1027,26 +1028,14 @@ def _get_empty_meta(
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
+ dtype_dict: defaultdict[Hashable, Any]
if not is_dict_like(dtype):
# if dtype == None, default will be object.
default_dtype = dtype or object
- # error: Argument 1 to "defaultdict" has incompatible type "Callable[[],
- # Union[ExtensionDtype, str, dtype[Any], Type[object], Dict[Hashable,
- # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
- # Type[int], Type[complex], Type[bool], Type[object]]]]]"; expected
- # "Optional[Callable[[], Union[ExtensionDtype, str, dtype[Any],
- # Type[object]]]]"
- # error: Incompatible return value type (got "Union[ExtensionDtype, str,
- # dtype[Any], Type[object], Dict[Hashable, Union[ExtensionDtype, Union[str,
- # dtype[Any]], Type[str], Type[float], Type[int], Type[complex], Type[bool],
- # Type[object]]]]", expected "Union[ExtensionDtype, str, dtype[Any],
- # Type[object]]")
- dtype = defaultdict(
- lambda: default_dtype # type: ignore[arg-type, return-value]
- )
+ dtype_dict = defaultdict(lambda: default_dtype)
else:
dtype = cast(dict, dtype)
- dtype = defaultdict(
+ dtype_dict = defaultdict(
lambda: object,
{columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
)
@@ -1063,14 +1052,16 @@ def _get_empty_meta(
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
- data = [Series([], dtype=dtype[name]) for name in index_names]
+ data = [Series([], dtype=dtype_dict[name]) for name in index_names]
index = ensure_index_from_sequences(data, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
- col_dict = {col_name: Series([], dtype=dtype[col_name]) for col_name in columns}
+ col_dict = {
+ col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
+ }
return index, columns, col_dict
diff --git a/requirements-dev.txt b/requirements-dev.txt
index a3f71ac2a3aa5..0f1d76b996df1 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -12,7 +12,7 @@ flake8==4.0.1
flake8-bugbear==21.3.2
flake8-comprehensions==3.7.0
isort>=5.2.1
-mypy==0.941
+mypy==0.950
pre-commit>=2.9.2
pycodestyle
pyupgrade
| Mypy aims to have a monthly release. | https://api.github.com/repos/pandas-dev/pandas/pulls/46905 | 2022-04-30T13:08:54Z | 2022-05-15T15:33:17Z | 2022-05-15T15:33:17Z | 2022-05-30T07:02:51Z |
BUG: DatetimeIndex.resolution with nanosecond reso | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 128fd68674f96..4e2f547d7d2dc 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -553,6 +553,8 @@ Other Deprecations
- Deprecated the ``closed`` argument in :meth:`interval_range` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`)
- Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`)
+- Deprecated indexing on a timezone-naive :class:`DatetimeIndex` using a string representing a timezone-aware datetime (:issue:`46903`, :issue:`36148`)
+-
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
@@ -594,6 +596,7 @@ Datetimelike
- Bug in :meth:`Index.astype` when casting from object dtype to ``timedelta64[ns]`` dtype incorrectly casting ``np.datetime64("NaT")`` values to ``np.timedelta64("NaT")`` instead of raising (:issue:`45722`)
- Bug in :meth:`SeriesGroupBy.value_counts` index when passing categorical column (:issue:`44324`)
- Bug in :meth:`DatetimeIndex.tz_localize` localizing to UTC failing to make a copy of the underlying data (:issue:`46460`)
+- Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`)
-
Timedelta
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 700f66840f128..9492888e7db77 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -60,6 +60,10 @@ from pandas._libs.tslibs.nattype cimport (
)
from pandas._libs.tslibs.timestamps cimport _Timestamp
+from pandas._libs.tslibs import (
+ Resolution,
+ get_resolution,
+)
from pandas._libs.tslibs.timestamps import Timestamp
# Note: this is the only non-tslibs intra-pandas dependency here
@@ -122,11 +126,11 @@ def format_array_from_datetime(
"""
cdef:
int64_t val, ns, N = len(values)
- ndarray[int64_t] consider_values
bint show_ms = False, show_us = False, show_ns = False
bint basic_format = False
ndarray[object] result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
- object ts, res
+ _Timestamp ts
+ str res
npy_datetimestruct dts
if na_rep is None:
@@ -136,16 +140,10 @@ def format_array_from_datetime(
# a format based on precision
basic_format = format is None and tz is None
if basic_format:
- consider_values = values[values != NPY_NAT]
- show_ns = (consider_values % 1000).any()
-
- if not show_ns:
- consider_values //= 1000
- show_us = (consider_values % 1000).any()
-
- if not show_ms:
- consider_values //= 1000
- show_ms = (consider_values % 1000).any()
+ reso_obj = get_resolution(values)
+ show_ns = reso_obj == Resolution.RESO_NS
+ show_us = reso_obj == Resolution.RESO_US
+ show_ms = reso_obj == Resolution.RESO_MS
for i in range(N):
val = values[i]
@@ -178,6 +176,7 @@ def format_array_from_datetime(
# invalid format string
# requires dates > 1900
try:
+ # Note: dispatches to pydatetime
result[i] = ts.strftime(format)
except ValueError:
result[i] = str(ts)
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 31d0579900abd..511ce26feeefa 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -204,7 +204,9 @@ def ints_to_pydatetime(
cdef inline c_Resolution _reso_stamp(npy_datetimestruct *dts):
- if dts.us != 0:
+ if dts.ps != 0:
+ return c_Resolution.RESO_NS
+ elif dts.us != 0:
if dts.us % 1000 == 0:
return c_Resolution.RESO_MS
return c_Resolution.RESO_US
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 5274f68eb3171..806d081c0176b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -593,7 +593,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
end = self._maybe_cast_for_get_loc(end)
return start, end
- def _deprecate_mismatched_indexing(self, key) -> None:
+ def _deprecate_mismatched_indexing(self, key, one_way: bool = False) -> None:
# GH#36148
# we get here with isinstance(key, self._data._recognized_scalars)
try:
@@ -606,6 +606,10 @@ def _deprecate_mismatched_indexing(self, key) -> None:
"raise KeyError in a future version. "
"Use a timezone-naive object instead."
)
+ elif one_way:
+ # we special-case timezone-naive strings and timezone-aware
+ # DatetimeIndex
+ return
else:
msg = (
"Indexing a timezone-aware DatetimeIndex with a "
@@ -640,6 +644,7 @@ def get_loc(self, key, method=None, tolerance=None):
parsed, reso = self._parse_with_reso(key)
except ValueError as err:
raise KeyError(key) from err
+ self._deprecate_mismatched_indexing(parsed, one_way=True)
if self._can_partial_date_slice(reso):
try:
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 332ab02255911..8d498b59c55d1 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -10,6 +10,22 @@
class TestDatetimeIndex:
+ def test_get_loc_naive_dti_aware_str_deprecated(self):
+ # GH#46903
+ ts = Timestamp("20130101").value
+ dti = pd.DatetimeIndex([ts + 50 + i for i in range(100)])
+ ser = Series(range(100), index=dti)
+
+ key = "2013-01-01 00:00:00.000000050+0000"
+ msg = "Indexing a timezone-naive DatetimeIndex with a timezone-aware datetime"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = ser[key]
+ assert res == 0
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ loc = dti.get_loc(key)
+ assert loc == 0
+
def test_indexing_with_datetime_tz(self):
# GH#8260
diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py
index 5557322eae42d..4381aa3f34f8d 100644
--- a/pandas/tests/series/methods/test_asof.py
+++ b/pandas/tests/series/methods/test_asof.py
@@ -23,9 +23,12 @@ def test_asof_nanosecond_index_access(self):
first_value = ser.asof(ser.index[0])
+ # GH#46903 previously incorrectly was "day"
+ assert dti.resolution == "nanosecond"
+
# this used to not work bc parsing was done by dateutil that didn't
# handle nanoseconds
- assert first_value == ser["2013-01-01 00:00:00.000000050+0000"]
+ assert first_value == ser["2013-01-01 00:00:00.000000050"]
expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")
assert first_value == ser[Timestamp(expected_ts)]
diff --git a/pandas/tests/tslibs/test_resolution.py b/pandas/tests/tslibs/test_resolution.py
new file mode 100644
index 0000000000000..15f4a9d032e5c
--- /dev/null
+++ b/pandas/tests/tslibs/test_resolution.py
@@ -0,0 +1,13 @@
+import numpy as np
+
+from pandas._libs.tslibs import (
+ Resolution,
+ get_resolution,
+)
+
+
+def test_get_resolution_nano():
+ # don't return the fallback RESO_DAY
+ arr = np.array([1], dtype=np.int64)
+ res = get_resolution(arr)
+ assert res == Resolution.RESO_NS
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46903 | 2022-04-30T02:14:50Z | 2022-05-19T13:12:04Z | 2022-05-19T13:12:03Z | 2022-05-19T15:12:06Z |
ENH: fields.get_start_end_field support non-nano | diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py
index 23ae73811204c..203afcdaa7378 100644
--- a/asv_bench/benchmarks/tslibs/fields.py
+++ b/asv_bench/benchmarks/tslibs/fields.py
@@ -66,9 +66,9 @@ class TimeGetStartEndField:
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
- self.i8data = arr
+ self.dt64data = arr.view("M8[ns]")
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
- get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
+ get_start_end_field(self.dt64data, self.attrname, freqstr, month_kw=month_kw)
diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi
index 571a327b46df8..228f7dbdf5eac 100644
--- a/pandas/_libs/tslibs/fields.pyi
+++ b/pandas/_libs/tslibs/fields.pyi
@@ -12,7 +12,7 @@ def get_date_name_field(
locale: str | None = ...,
) -> npt.NDArray[np.object_]: ...
def get_start_end_field(
- dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ dt64values: npt.NDArray[np.datetime64],
field: str,
freqstr: str | None = ...,
month_kw: int = ...,
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index cc82deec08a28..e8980dc1a7553 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -5,8 +5,10 @@ objects and arrays
from locale import LC_TIME
from _strptime import LocaleTime
+
cimport cython
from cython cimport Py_ssize_t
+
import numpy as np
cimport numpy as cnp
@@ -41,8 +43,11 @@ from pandas._libs.tslibs.ccalendar cimport (
)
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
+ NPY_DATETIMEUNIT,
dt64_to_dtstruct,
+ get_unit_from_dtype,
npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
pandas_timedeltastruct,
td64_to_tdstruct,
)
@@ -196,22 +201,35 @@ cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_start_end_field(const int64_t[:] dtindex, str field,
+def get_start_end_field(ndarray dt64values, str field,
str freqstr=None, int month_kw=12):
"""
Given an int64-based datetime index return array of indicators
of whether timestamps are at the start/end of the month/quarter/year
(defined by frequency).
+
+ Parameters
+ ----------
+ dt64values : ndarray[datetime64], any resolution
+ field : str
+ frestr : str or None, default None
+ month_kw : int, default 12
+
+ Returns
+ -------
+ ndarray[bool]
"""
cdef:
Py_ssize_t i
- int count = len(dtindex)
+ int count = dt64values.size
bint is_business = 0
int end_month = 12
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
int compare_month, modby
+ ndarray dtindex = dt64values.view("i8")
+ NPY_DATETIMEUNIT reso = get_unit_from_dtype(dt64values.dtype)
out = np.zeros(count, dtype='int8')
@@ -251,7 +269,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_firstbday(dts.year, dts.month)):
@@ -263,7 +281,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and dts.day == 1:
out[i] = 1
@@ -275,7 +293,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_lastbday(dts.year, dts.month)):
@@ -287,7 +305,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
out[i] = 0
continue
- dt64_to_dtstruct(dtindex[i], &dts)
+ pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts)
if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_days_in_month(dts.year, dts.month)):
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index db951027e5794..e7ac855d6a832 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -487,9 +487,7 @@ cdef class _Timestamp(ABCTimestamp):
dict kwds
ndarray[uint8_t, cast=True] out
int month_kw
-
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
+ str unit
if freq:
kwds = freq.kwds
@@ -500,7 +498,9 @@ cdef class _Timestamp(ABCTimestamp):
freqstr = None
val = self._maybe_convert_value_to_local()
- out = get_start_end_field(np.array([val], dtype=np.int64),
+
+ unit = npy_unit_to_abbrev(self._reso)
+ out = get_start_end_field(np.array([val], dtype=f"M8[{unit}]"),
field, freqstr, month_kw)
return out[0]
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5c8c6d7fe23a3..1e409dc17a06d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -130,7 +130,7 @@ def f(self):
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
- values, field, self.freqstr, month_kw
+ values.view(self._ndarray.dtype), field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index ab7bc4c7cb412..bc9e6c0131646 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -732,6 +732,27 @@ def test_non_nano_fields(self, dt64, ts):
assert ts.weekday() == alt.weekday()
assert ts.isoweekday() == alt.isoweekday()
+ def test_start_end_fields(self, ts):
+ assert ts.is_year_start
+ assert ts.is_quarter_start
+ assert ts.is_month_start
+ assert not ts.is_year_end
+ assert not ts.is_month_end
+ assert not ts.is_month_end
+
+ freq = offsets.BDay()
+ ts._set_freq(freq)
+
+ # 2016-01-01 is a Friday, so is year/quarter/month start with this freq
+ msg = "Timestamp.freq is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ assert ts.is_year_start
+ assert ts.is_quarter_start
+ assert ts.is_month_start
+ assert not ts.is_year_end
+ assert not ts.is_month_end
+ assert not ts.is_month_end
+
def test_repr(self, dt64, ts):
alt = Timestamp(dt64)
diff --git a/pandas/tests/tslibs/test_fields.py b/pandas/tests/tslibs/test_fields.py
index 9e6464f7727bd..528d08d7f499b 100644
--- a/pandas/tests/tslibs/test_fields.py
+++ b/pandas/tests/tslibs/test_fields.py
@@ -28,7 +28,10 @@ def test_get_date_field_readonly(dtindex):
def test_get_start_end_field_readonly(dtindex):
- result = fields.get_start_end_field(dtindex, "is_month_start", None)
+ dt64values = dtindex.view("M8[ns]")
+ dt64values.flags.writeable = False
+
+ result = fields.get_start_end_field(dt64values, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
diff --git a/setup.py b/setup.py
index 67b91c55dd397..bca919a3aa6f8 100755
--- a/setup.py
+++ b/setup.py
@@ -506,6 +506,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
"_libs.tslibs.fields": {
"pyxfile": "_libs/tslibs/fields",
"depends": tseries_depends,
+ "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
"_libs.tslibs.np_datetime": {
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46902 | 2022-04-30T01:36:32Z | 2022-05-05T22:08:23Z | 2022-05-05T22:08:23Z | 2022-05-05T22:14:20Z |
ENH: allow non-nano in DatetimeArray, TimedeltaArray._simple_new | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 7cbc1833093ba..b3a006141fadc 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -25,6 +25,7 @@
"Tick",
"BaseOffset",
"tz_compare",
+ "is_unitless",
]
from pandas._libs.tslibs import dtypes
@@ -39,6 +40,7 @@
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
+ is_unitless,
)
from pandas._libs.tslibs.offsets import (
BaseOffset,
diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi
index 59f4427125266..27871a78f8aaf 100644
--- a/pandas/_libs/tslibs/np_datetime.pyi
+++ b/pandas/_libs/tslibs/np_datetime.pyi
@@ -9,3 +9,4 @@ def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ...
def astype_overflowsafe(
arr: np.ndarray, dtype: np.dtype, copy: bool = ...
) -> np.ndarray: ...
+def is_unitless(dtype: np.dtype) -> bool: ...
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 07d198193464f..a787bded2bd50 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -92,6 +92,18 @@ def py_get_unit_from_dtype(dtype):
return get_unit_from_dtype(dtype)
+def is_unitless(dtype: cnp.dtype) -> bool:
+ """
+ Check if a datetime64 or timedelta64 dtype has no attached unit.
+ """
+ if dtype.type_num not in [cnp.NPY_DATETIME, cnp.NPY_TIMEDELTA]:
+ raise ValueError("is_unitless dtype must be datetime64 or timedelta64")
+ cdef:
+ NPY_DATETIMEUNIT unit = get_unit_from_dtype(dtype)
+
+ return unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
+
+
# ----------------------------------------------------------------------
# Comparison
diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi
index a04104915cf1f..c547503bae273 100644
--- a/pandas/_libs/tslibs/timedeltas.pyi
+++ b/pandas/_libs/tslibs/timedeltas.pyi
@@ -88,6 +88,8 @@ class Timedelta(timedelta):
# GH 46171
# While Timedelta can return pd.NaT, having the constructor return
# a Union with NaTType makes things awkward for users of pandas
+ @classmethod
+ def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
@property
def days(self) -> int: ...
@property
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e7f97413f6881..7fef934a85626 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -31,6 +31,7 @@
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
+ is_unitless,
normalize_i8_timestamps,
timezones,
to_offset,
@@ -335,7 +336,12 @@ def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
- assert values.dtype == DT64NS_DTYPE
+ assert dtype.kind == "M"
+ if isinstance(dtype, np.dtype):
+ # TODO: once non-nano DatetimeTZDtype is implemented, require that
+ # dtype's reso match values's reso
+ assert dtype == values.dtype
+ assert not is_unitless(dtype)
result = super()._simple_new(values, dtype)
result._freq = freq
@@ -761,7 +767,7 @@ def _add_offset(self, offset) -> DatetimeArray:
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
- result = DatetimeArray._simple_new(result)
+ result = DatetimeArray._simple_new(result, dtype=result.dtype)
result = result.tz_localize(self.tz)
except NotImplementedError:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 1f55842050df0..816f07b076ef8 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -29,6 +29,7 @@
precision_from_unit,
)
from pandas._libs.tslibs.fields import get_timedelta_field
+from pandas._libs.tslibs.np_datetime import py_get_unit_from_dtype
from pandas._libs.tslibs.timedeltas import (
array_to_timedelta64,
ints_to_pytimedelta,
@@ -40,6 +41,7 @@
npt,
)
from pandas.compat.numpy import function as nv
+from pandas.util._decorators import cache_readonly
from pandas.util._validators import validate_endpoints
from pandas.core.dtypes.astype import astype_td64_unit_conversion
@@ -154,8 +156,15 @@ class TimedeltaArray(dtl.TimelikeOps):
# Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)
# operates pointwise.
+ @cache_readonly
+ def _reso(self):
+ return py_get_unit_from_dtype(self.dtype)
+
def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType:
- return Timedelta(x, unit="ns")
+ y = x.view("i8")
+ if y == NaT.value:
+ return NaT
+ return Timedelta._from_value_and_reso(y, reso=self._reso)
@property
# error: Return type "dtype" of "dtype" incompatible with return type
@@ -174,7 +183,7 @@ def dtype(self) -> np.dtype: # type: ignore[override]
-------
numpy.dtype
"""
- return TD64NS_DTYPE
+ return self._ndarray.dtype
# ----------------------------------------------------------------
# Constructors
@@ -244,11 +253,13 @@ def __init__(
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=TD64NS_DTYPE
) -> TimedeltaArray:
- assert dtype == TD64NS_DTYPE, dtype
+ # Require td64 dtype, not unit-less, matching values.dtype
+ assert isinstance(dtype, np.dtype) and dtype.kind == "m"
+ assert not tslibs.is_unitless(dtype)
assert isinstance(values, np.ndarray), type(values)
- assert values.dtype == TD64NS_DTYPE
+ assert dtype == values.dtype
- result = super()._simple_new(values=values, dtype=TD64NS_DTYPE)
+ result = super()._simple_new(values=values, dtype=dtype)
result._freq = freq
return result
@@ -262,7 +273,7 @@ def _from_sequence(
data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)
freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)
- return cls._simple_new(data, freq=freq)
+ return cls._simple_new(data, dtype=data.dtype, freq=freq)
@classmethod
def _from_sequence_not_strict(
@@ -286,7 +297,7 @@ def _from_sequence_not_strict(
if explicit_none:
freq = None
- result = cls._simple_new(data, freq=freq)
+ result = cls._simple_new(data, dtype=data.dtype, freq=freq)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
@@ -330,7 +341,8 @@ def _generate_range(cls, start, end, periods, freq, closed=None):
if not right_closed:
index = index[:-1]
- return cls._simple_new(index.view("m8[ns]"), freq=freq)
+ td64values = index.view("m8[ns]")
+ return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq)
# ----------------------------------------------------------------
# DatetimeLike Interface
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 9ea87be2a5468..8eb5cc2dd82f6 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -11,6 +11,17 @@
from pandas.core.arrays import DatetimeArray
+class TestNonNano:
+ @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)])
+ @pytest.mark.xfail(reason="_box_func is not yet patched to get reso right")
+ def test_non_nano(self, unit, reso):
+ arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]")
+ dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
+
+ assert dta.dtype == arr.dtype
+ assert dta[0]._reso == reso
+
+
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index bf3491496ab3a..46306167878f6 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -7,6 +7,16 @@
from pandas.core.arrays import TimedeltaArray
+class TestNonNano:
+ @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)])
+ def test_non_nano(self, unit, reso):
+ arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
+ tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
+
+ assert tda.dtype == arr.dtype
+ assert tda[0]._reso == reso
+
+
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index 9655bb88c2fcf..273a7985ff50b 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -50,6 +50,7 @@ def test_namespace():
"tz_convert_from_utc_single",
"to_offset",
"tz_compare",
+ "is_unitless",
]
expected = set(submodules + api)
diff --git a/pandas/tests/tslibs/test_np_datetime.py b/pandas/tests/tslibs/test_np_datetime.py
index 336c7d30d5f77..31f48b9ad7c4a 100644
--- a/pandas/tests/tslibs/test_np_datetime.py
+++ b/pandas/tests/tslibs/test_np_datetime.py
@@ -4,6 +4,7 @@
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
astype_overflowsafe,
+ is_unitless,
py_get_unit_from_dtype,
py_td64_to_tdstruct,
)
@@ -11,6 +12,28 @@
import pandas._testing as tm
+def test_is_unitless():
+ dtype = np.dtype("M8[ns]")
+ assert not is_unitless(dtype)
+
+ dtype = np.dtype("datetime64")
+ assert is_unitless(dtype)
+
+ dtype = np.dtype("m8[ns]")
+ assert not is_unitless(dtype)
+
+ dtype = np.dtype("timedelta64")
+ assert is_unitless(dtype)
+
+ msg = "dtype must be datetime64 or timedelta64"
+ with pytest.raises(ValueError, match=msg):
+ is_unitless(np.dtype(np.int64))
+
+ msg = "Argument 'dtype' has incorrect type"
+ with pytest.raises(TypeError, match=msg):
+ is_unitless("foo")
+
+
def test_get_unit_from_dtype():
# datetime64
assert py_get_unit_from_dtype(np.dtype("M8[Y]")) == 0
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46901 | 2022-04-30T01:26:17Z | 2022-05-06T21:20:03Z | 2022-05-06T21:20:03Z | 2022-05-06T22:26:38Z |
Update Git.io deprecated link (#46888) | diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 2f65ce17f93b2..1904ce32f3170 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -435,7 +435,7 @@ def _str_rstrip(self, to_strip=None):
def _str_removeprefix(self, prefix: str) -> Series:
# outstanding question on whether to use native methods for users
- # on Python 3.9+ https://git.io/JE9QK, in which case we could do
+ # on Python 3.9+ https://bit.ly/3LuMeRn, in which case we could do
# return self._str_map(str.removeprefix)
def removeprefix(text: str) -> str:
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46899 | 2022-04-29T23:56:21Z | 2022-04-30T00:48:13Z | 2022-04-30T00:48:13Z | 2022-04-30T15:41:44Z |
REF: Use Localizer more | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index c876cc55be0be..e5217259a3648 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -52,12 +52,8 @@ from pandas._libs.tslibs.np_datetime import (
)
from pandas._libs.tslibs.timezones cimport (
- get_dst_info,
get_utcoffset,
- is_fixed_offset,
- is_tzlocal,
is_utc,
- is_zoneinfo,
maybe_get_tz,
tz_compare,
utc_pytz as UTC,
@@ -77,10 +73,7 @@ from pandas._libs.tslibs.nattype cimport (
checknull_with_nat,
)
from pandas._libs.tslibs.tzconversion cimport (
- bisect_right_i8,
- infer_dateutil_fold,
- localize_tzinfo_api,
- tz_convert_from_utc_single,
+ Localizer,
tz_localize_to_utc_single,
)
@@ -518,9 +511,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts,
_TSObject obj = _TSObject()
int64_t value # numpy dt64
datetime dt
- ndarray[int64_t] trans
- int64_t* tdata
- int64_t[::1] deltas
+ Py_ssize_t pos
value = dtstruct_to_dt64(&dts)
obj.dts = dts
@@ -530,19 +521,18 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts,
check_overflows(obj)
return obj
+ cdef:
+ Localizer info = Localizer(tz)
+
# Infer fold from offset-adjusted obj.value
# see PEP 495 https://www.python.org/dev/peps/pep-0495/#the-fold-attribute
- if is_utc(tz):
+ if info.use_utc:
pass
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- localize_tzinfo_api(obj.value, tz, &obj.fold)
- else:
- trans, deltas, typ = get_dst_info(tz)
-
- if typ == 'dateutil':
- tdata = <int64_t*>cnp.PyArray_DATA(trans)
- pos = bisect_right_i8(tdata, obj.value, trans.shape[0]) - 1
- obj.fold = infer_dateutil_fold(obj.value, trans, deltas, pos)
+ elif info.use_tzlocal:
+ info.utc_val_to_local_val(obj.value, &pos, &obj.fold)
+ elif info.use_dst and not info.use_pytz:
+ # i.e. dateutil
+ info.utc_val_to_local_val(obj.value, &pos, &obj.fold)
# Keep the converter same as PyDateTime's
dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day,
@@ -700,18 +690,19 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz):
cdef:
int64_t local_val
Py_ssize_t outpos = -1
+ Localizer info = Localizer(tz)
assert obj.tzinfo is None
- if is_utc(tz):
+ if info.use_utc:
pass
elif obj.value == NPY_NAT:
pass
else:
- local_val = tz_convert_from_utc_single(obj.value, tz, &obj.fold, &outpos)
+ local_val = info.utc_val_to_local_val(obj.value, &outpos, &obj.fold)
- if outpos != -1:
- # infer we went through a pytz path
+ if info.use_pytz:
+ # infer we went through a pytz path, will have outpos!=-1
tz = tz._tzinfos[tz._transition_info[outpos]]
dt64_to_dtstruct(local_val, &obj.dts)
diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd
index ce7541fe1e74e..a34161b20e2ff 100644
--- a/pandas/_libs/tslibs/tzconversion.pxd
+++ b/pandas/_libs/tslibs/tzconversion.pxd
@@ -2,6 +2,7 @@ from cpython.datetime cimport tzinfo
from numpy cimport (
int64_t,
intp_t,
+ ndarray,
)
@@ -21,5 +22,23 @@ cdef bint infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
- intp_t pos,
+ Py_ssize_t pos,
)
+
+
+cdef class Localizer:
+ cdef:
+ tzinfo tz
+ bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
+ ndarray trans
+ Py_ssize_t ntrans
+ const int64_t[::1] deltas
+ int64_t delta
+ int64_t* tdata
+
+ cdef inline int64_t utc_val_to_local_val(
+ self,
+ int64_t utc_val,
+ Py_ssize_t* pos,
+ bint* fold=?,
+ ) except? -1
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index c6fe40d082038..8d307e324ba4e 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -46,6 +46,75 @@ from pandas._libs.tslibs.timezones cimport (
)
+cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
+
+
+@cython.freelist(16)
+#@cython.internal
+@cython.final
+cdef class Localizer:
+ # cdef:
+ # tzinfo tz
+ # bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
+ # ndarray trans
+ # Py_ssize_t ntrans
+ # const int64_t[::1] deltas
+ # int64_t delta
+ # int64_t* tdata
+
+ @cython.initializedcheck(False)
+ @cython.boundscheck(False)
+ def __cinit__(self, tzinfo tz):
+ self.tz = tz
+ self.use_utc = self.use_tzlocal = self.use_fixed = False
+ self.use_dst = self.use_pytz = False
+ self.ntrans = -1 # placeholder
+ self.delta = -1 # placeholder
+ self.deltas = _deltas_placeholder
+ self.tdata = NULL
+
+ if is_utc(tz) or tz is None:
+ self.use_utc = True
+
+ elif is_tzlocal(tz) or is_zoneinfo(tz):
+ self.use_tzlocal = True
+
+ else:
+ trans, deltas, typ = get_dst_info(tz)
+ self.trans = trans
+ self.ntrans = self.trans.shape[0]
+ self.deltas = deltas
+
+ if typ != "pytz" and typ != "dateutil":
+ # static/fixed; in this case we know that len(delta) == 1
+ self.use_fixed = True
+ self.delta = self.deltas[0]
+ else:
+ self.use_dst = True
+ if typ == "pytz":
+ self.use_pytz = True
+ self.tdata = <int64_t*>cnp.PyArray_DATA(self.trans)
+
+ @cython.boundscheck(False)
+ cdef inline int64_t utc_val_to_local_val(
+ self, int64_t utc_val, Py_ssize_t* pos, bint* fold=NULL
+ ) except? -1:
+ if self.use_utc:
+ return utc_val
+ elif self.use_tzlocal:
+ return utc_val + localize_tzinfo_api(utc_val, self.tz, fold)
+ elif self.use_fixed:
+ return utc_val + self.delta
+ else:
+ pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
+ if fold is not NULL:
+ fold[0] = infer_dateutil_fold(
+ utc_val, self.trans, self.deltas, pos[0]
+ )
+
+ return utc_val + self.deltas[pos[0]]
+
+
cdef int64_t tz_localize_to_utc_single(
int64_t val, tzinfo tz, object ambiguous=None, object nonexistent=None,
) except? -1:
@@ -465,44 +534,16 @@ cdef int64_t tz_convert_from_utc_single(
converted: int64
"""
cdef:
- int64_t delta
- int64_t[::1] deltas
- ndarray[int64_t, ndim=1] trans
- int64_t* tdata
- intp_t pos
+ Localizer info = Localizer(tz)
+ Py_ssize_t pos
if utc_val == NPY_NAT:
return utc_val
- if is_utc(tz):
- return utc_val
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- return utc_val + _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False, fold=fold)
+ if outpos is not NULL and info.use_pytz:
+ return info.utc_val_to_local_val(utc_val, outpos, fold)
else:
- trans, deltas, typ = get_dst_info(tz)
- tdata = <int64_t*>cnp.PyArray_DATA(trans)
-
- if typ == "dateutil":
- pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1
-
- if fold is not NULL:
- fold[0] = infer_dateutil_fold(utc_val, trans, deltas, pos)
- return utc_val + deltas[pos]
-
- elif typ == "pytz":
- pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1
-
- # We need to get 'pos' back to the caller so it can pick the
- # correct "standardized" tzinfo object.
- if outpos is not NULL:
- outpos[0] = pos
- return utc_val + deltas[pos]
-
- else:
- # All other cases have len(deltas) == 1. As of 2018-07-17
- # (and 2022-03-07), all test cases that get here have
- # is_fixed_offset(tz).
- return utc_val + deltas[0]
+ return info.utc_val_to_local_val(utc_val, &pos, fold)
# OSError may be thrown by tzlocal on windows at or close to 1970-01-01
@@ -571,7 +612,7 @@ cdef bint infer_dateutil_fold(
int64_t value,
const int64_t[::1] trans,
const int64_t[::1] deltas,
- intp_t pos,
+ Py_ssize_t pos,
):
"""
Infer _TSObject fold property from value by assuming 0 and then setting
@@ -584,7 +625,7 @@ cdef bint infer_dateutil_fold(
ndarray of offset transition points in nanoseconds since epoch.
deltas : int64_t[:]
array of offsets corresponding to transition points in trans.
- pos : intp_t
+ pos : Py_ssize_t
Position of the last transition point before taking fold into account.
Returns
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 6b78100705a93..fea357c9da98b 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -32,78 +32,8 @@ from .np_datetime cimport (
from .offsets cimport BaseOffset
from .period cimport get_period_ordinal
from .timestamps cimport create_timestamp_from_ts
-from .timezones cimport (
- get_dst_info,
- is_tzlocal,
- is_utc,
- is_zoneinfo,
-)
-from .tzconversion cimport (
- bisect_right_i8,
- localize_tzinfo_api,
-)
-
-
-cdef const int64_t[::1] _deltas_placeholder = np.array([], dtype=np.int64)
-
-
-@cython.freelist(16)
-@cython.internal
-@cython.final
-cdef class Localizer:
- cdef:
- tzinfo tz
- bint use_utc, use_fixed, use_tzlocal, use_dst, use_pytz
- ndarray trans
- Py_ssize_t ntrans
- const int64_t[::1] deltas
- int64_t delta
- int64_t* tdata
-
- @cython.initializedcheck(False)
- @cython.boundscheck(False)
- def __cinit__(self, tzinfo tz):
- self.tz = tz
- self.use_utc = self.use_tzlocal = self.use_fixed = False
- self.use_dst = self.use_pytz = False
- self.ntrans = -1 # placeholder
- self.delta = -1 # placeholder
- self.deltas = _deltas_placeholder
- self.tdata = NULL
-
- if is_utc(tz) or tz is None:
- self.use_utc = True
-
- elif is_tzlocal(tz) or is_zoneinfo(tz):
- self.use_tzlocal = True
-
- else:
- trans, deltas, typ = get_dst_info(tz)
- self.trans = trans
- self.ntrans = self.trans.shape[0]
- self.deltas = deltas
-
- if typ != "pytz" and typ != "dateutil":
- # static/fixed; in this case we know that len(delta) == 1
- self.use_fixed = True
- self.delta = self.deltas[0]
- else:
- self.use_dst = True
- if typ == "pytz":
- self.use_pytz = True
- self.tdata = <int64_t*>cnp.PyArray_DATA(self.trans)
-
- @cython.boundscheck(False)
- cdef inline int64_t utc_val_to_local_val(self, int64_t utc_val, Py_ssize_t* pos) except? -1:
- if self.use_utc:
- return utc_val
- elif self.use_tzlocal:
- return utc_val + localize_tzinfo_api(utc_val, self.tz)
- elif self.use_fixed:
- return utc_val + self.delta
- else:
- pos[0] = bisect_right_i8(self.tdata, utc_val, self.ntrans) - 1
- return utc_val + self.deltas[pos[0]]
+from .timezones cimport is_utc
+from .tzconversion cimport Localizer
@cython.boundscheck(False)
@@ -140,15 +70,7 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
result[i] = NPY_NAT
continue
- if info.use_utc:
- local_val = utc_val
- elif info.use_tzlocal:
- local_val = utc_val + localize_tzinfo_api(utc_val, tz)
- elif info.use_fixed:
- local_val = utc_val + info.delta
- else:
- pos = bisect_right_i8(info.tdata, utc_val, info.ntrans) - 1
- local_val = utc_val + info.deltas[pos]
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
result[i] = local_val
| I think this gets us to maximal feasible sharing for utc_val_to_local_val. | https://api.github.com/repos/pandas-dev/pandas/pulls/46898 | 2022-04-29T22:58:27Z | 2022-04-30T03:14:03Z | 2022-04-30T03:14:03Z | 2022-04-30T03:24:42Z |
Styler whatsew | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index c85a087835b80..9f8ab3118751d 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -37,10 +37,17 @@ The protocol consists of two parts:
Styler
^^^^^^
- - New method :meth:`.Styler.to_string` for alternative customisable output methods (:issue:`44502`)
- - Added the ability to render ``border`` and ``border-{side}`` CSS properties in Excel (:issue:`42276`)
- - Added a new method :meth:`.Styler.concat` which allows adding customised footer rows to visualise additional calculations on the data, e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
- - :meth:`.Styler.highlight_null` now accepts ``color`` consistently with other builtin methods and deprecates ``null_color`` although this remains backwards compatible (:issue:`45907`)
+The most notable development is the new method :meth:`.Styler.concat` which
+allows adding customised footer rows to visualise additional calculations on the data,
+e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
+
+Additionally there is an alternative output method :meth:`.Styler.to_string`,
+which allows using the Styler's formatting methods to create, for example, CSVs (:issue:`44502`).
+
+Minor feature improvements are:
+
+ - Adding the ability to render ``border`` and ``border-{side}`` CSS properties in Excel (:issue:`42276`)
+ - Making keyword arguments consist: :meth:`.Styler.highlight_null` now accepts ``color`` and deprecates ``null_color`` although this remains backwards compatible (:issue:`45907`)
.. _whatsnew_150.enhancements.resample_group_keys:
@@ -127,13 +134,6 @@ Notable bug fixes
These are bug fixes that might have notable behavior changes.
-.. _whatsnew_150.notable_bug_fixes.notable_bug_fix1:
-
-Styler
-^^^^^^
-
-- Fixed bug in :class:`CSSToExcelConverter` leading to ``TypeError`` when border color provided without border style for ``xlsxwriter`` engine (:issue:`42276`)
-
.. _whatsnew_150.notable_bug_fixes.groupby_transform_dropna:
Using ``dropna=True`` with ``groupby`` transforms
@@ -194,13 +194,6 @@ did not have the same index as the input.
df.groupby('a', dropna=True).transform('ffill')
df.groupby('a', dropna=True).transform(lambda x: x)
-.. _whatsnew_150.notable_bug_fixes.visualization:
-
-Styler
-^^^^^^
-
-- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
-
.. _whatsnew_150.notable_bug_fixes.notable_bug_fix2:
notable_bug_fix2
@@ -615,6 +608,7 @@ Plotting
- Bug in :meth:`DataFrame.boxplot` that prevented specifying ``vert=False`` (:issue:`36918`)
- Bug in :meth:`DataFrame.plot.scatter` that prevented specifying ``norm`` (:issue:`45809`)
- The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`)
+- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -659,7 +653,7 @@ ExtensionArray
Styler
^^^^^^
- Bug when attempting to apply styling functions to an empty DataFrame subset (:issue:`45313`)
--
+- Bug in :class:`CSSToExcelConverter` leading to ``TypeError`` when border color provided without border style for ``xlsxwriter`` engine (:issue:`42276`)
Metadata
^^^^^^^^
| editing the whatsnew placeholder ahead of 1.5, in case I don't have time later on
| https://api.github.com/repos/pandas-dev/pandas/pulls/46897 | 2022-04-29T19:51:48Z | 2022-04-29T22:42:07Z | 2022-04-29T22:42:07Z | 2022-04-30T14:49:25Z |
PERF: Optimize read_excel nrows | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 3363b43f29b78..a2d989e787e0f 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -86,4 +86,15 @@ def time_read_excel(self, engine):
read_excel(fname, engine=engine)
+class ReadExcelNRows(ReadExcel):
+ def time_read_excel(self, engine):
+ if engine == "xlrd":
+ fname = self.fname_excel_xls
+ elif engine == "odf":
+ fname = self.fname_odf
+ else:
+ fname = self.fname_excel
+ read_excel(fname, engine=engine, nrows=10)
+
+
from ..pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 2efc6c9167a83..3bb9a72d6e2f9 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -573,6 +573,7 @@ Performance improvements
- Performance improvement when setting values in a pyarrow backed string array (:issue:`46400`)
- Performance improvement in :func:`factorize` (:issue:`46109`)
- Performance improvement in :class:`DataFrame` and :class:`Series` constructors for extension dtype scalars (:issue:`45854`)
+- Performance improvement in :func:`read_excel` when ``nrows`` argument provided (:issue:`32727`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.bug_fixes:
diff --git a/pandas/io/common.py b/pandas/io/common.py
index fdee1600c2a32..98964b100966f 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -28,6 +28,7 @@
Generic,
Literal,
Mapping,
+ Sequence,
TypeVar,
cast,
overload,
@@ -56,7 +57,12 @@
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
-from pandas.core.dtypes.common import is_file_like
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_file_like,
+ is_integer,
+ is_list_like,
+)
from pandas.core.shared_docs import _shared_docs
@@ -177,12 +183,32 @@ def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
def validate_header_arg(header: object) -> None:
- if isinstance(header, bool):
+ if header is None:
+ return
+ if is_integer(header):
+ header = cast(int, header)
+ if header < 0:
+ # GH 27779
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
+ return
+ if is_list_like(header, allow_sets=False):
+ header = cast(Sequence, header)
+ if not all(map(is_integer, header)):
+ raise ValueError("header must be integer or list of integers")
+ if any(i < 0 for i in header):
+ raise ValueError("cannot specify multi-index header with negative integers")
+ return
+ if is_bool(header):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
+ # GH 16338
+ raise ValueError("header must be integer or list of integers")
@overload
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 030ae9fefda98..d20f347e54d6b 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -2,6 +2,7 @@
import abc
import datetime
+from functools import partial
from io import BytesIO
import os
from textwrap import fill
@@ -70,6 +71,7 @@
pop_header_name,
)
from pandas.io.parsers import TextParser
+from pandas.io.parsers.readers import validate_integer
_read_excel_doc = (
"""
@@ -563,7 +565,7 @@ def get_sheet_by_index(self, index: int):
pass
@abc.abstractmethod
- def get_sheet_data(self, sheet, convert_float: bool):
+ def get_sheet_data(self, sheet, convert_float: bool, rows: int | None = None):
pass
def raise_if_bad_sheet_by_index(self, index: int) -> None:
@@ -577,6 +579,99 @@ def raise_if_bad_sheet_by_name(self, name: str) -> None:
if name not in self.sheet_names:
raise ValueError(f"Worksheet named '{name}' not found")
+ def _check_skiprows_func(
+ self,
+ skiprows: Callable,
+ rows_to_use: int,
+ ) -> int:
+ """
+ Determine how many file rows are required to obtain `nrows` data
+ rows when `skiprows` is a function.
+
+ Parameters
+ ----------
+ skiprows : function
+ The function passed to read_excel by the user.
+ rows_to_use : int
+ The number of rows that will be needed for the header and
+ the data.
+
+ Returns
+ -------
+ int
+ """
+ i = 0
+ rows_used_so_far = 0
+ while rows_used_so_far < rows_to_use:
+ if not skiprows(i):
+ rows_used_so_far += 1
+ i += 1
+ return i
+
+ def _calc_rows(
+ self,
+ header: int | Sequence[int] | None,
+ index_col: int | Sequence[int] | None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None,
+ nrows: int | None,
+ ) -> int | None:
+ """
+ If nrows specified, find the number of rows needed from the
+ file, otherwise return None.
+
+
+ Parameters
+ ----------
+ header : int, list of int, or None
+ See read_excel docstring.
+ index_col : int, list of int, or None
+ See read_excel docstring.
+ skiprows : list-like, int, callable, or None
+ See read_excel docstring.
+ nrows : int or None
+ See read_excel docstring.
+
+ Returns
+ -------
+ int or None
+ """
+ if nrows is None:
+ return None
+ if header is None:
+ header_rows = 1
+ elif is_integer(header):
+ header = cast(int, header)
+ header_rows = 1 + header
+ else:
+ header = cast(Sequence, header)
+ header_rows = 1 + header[-1]
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ if is_list_like(header) and index_col is not None:
+ header = cast(Sequence, header)
+ if len(header) > 1:
+ header_rows += 1
+ if skiprows is None:
+ return header_rows + nrows
+ if is_integer(skiprows):
+ skiprows = cast(int, skiprows)
+ return header_rows + nrows + skiprows
+ if is_list_like(skiprows):
+
+ def f(skiprows: Sequence, x: int) -> bool:
+ return x in skiprows
+
+ skiprows = cast(Sequence, skiprows)
+ return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
+ if callable(skiprows):
+ return self._check_skiprows_func(
+ skiprows,
+ header_rows + nrows,
+ )
+ # else unexpected skiprows type: read_excel will not optimize
+ # the number of rows read from file
+ return None
+
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
@@ -613,6 +708,7 @@ def parse(
)
validate_header_arg(header)
+ validate_integer("nrows", nrows)
ret_dict = False
@@ -643,7 +739,8 @@ def parse(
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
- data = self.get_sheet_data(sheet, convert_float)
+ file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
+ data = self.get_sheet_data(sheet, convert_float, file_rows_needed)
if hasattr(sheet, "close"):
# pyxlsb opens two TemporaryFiles
sheet.close()
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 5a7e5b0d8d325..075590f3535fe 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -90,7 +90,7 @@ def get_sheet_by_name(self, name: str):
raise ValueError(f"sheet {name} not found")
def get_sheet_data(
- self, sheet, convert_float: bool
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
) -> list[list[Scalar | NaTType]]:
"""
Parse an ODF Table into a list of lists
@@ -148,6 +148,8 @@ def get_sheet_data(
empty_rows = 0
for _ in range(row_repeat):
table.append(table_row)
+ if file_rows_needed is not None and len(table) >= file_rows_needed:
+ break
# Make our table square
for row in table:
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 6d70b3f319f37..8f4201d0befff 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -588,7 +588,9 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.value
- def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
+ def get_sheet_data(
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
if self.book.read_only:
sheet.reset_dimensions()
@@ -603,6 +605,8 @@ def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
if converted_row:
last_row_with_data = row_number
data.append(converted_row)
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
# Trim trailing empty rows
data = data[: last_row_with_data + 1]
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index 36e2645560078..5d40ccdf2f8f3 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -79,7 +79,12 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.v
- def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ file_rows_needed: int | None = None,
+ ) -> list[list[Scalar]]:
data: list[list[Scalar]] = []
prevous_row_number = -1
# When sparse=True the rows can have different lengths and empty rows are
@@ -94,6 +99,8 @@ def get_sheet_data(self, sheet, convert_float: bool) -> list[list[Scalar]]:
data.extend([[]] * (row_number - prevous_row_number - 1))
data.append(converted_row)
prevous_row_number = row_number
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
if data:
# extend rows to max_width
max_width = max(len(data_row) for data_row in data)
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index f38a05e7a4e64..0bf3ac6134cf6 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -1,8 +1,13 @@
+from __future__ import annotations
+
from datetime import time
import numpy as np
-from pandas._typing import StorageOptions
+from pandas._typing import (
+ Scalar,
+ StorageOptions,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
@@ -56,7 +61,9 @@ def get_sheet_by_index(self, index):
self.raise_if_bad_sheet_by_index(index)
return self.book.sheet_by_index(index)
- def get_sheet_data(self, sheet, convert_float):
+ def get_sheet_data(
+ self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
from xlrd import (
XL_CELL_BOOLEAN,
XL_CELL_DATE,
@@ -107,7 +114,10 @@ def _parse_cell(cell_contents, cell_typ):
data = []
- for i in range(sheet.nrows):
+ nrows = sheet.nrows
+ if file_rows_needed is not None:
+ nrows = min(nrows, file_rows_needed)
+ for i in range(nrows):
row = [
_parse_cell(value, typ)
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 2851ea36c8a33..e9c39d5ff1996 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -120,13 +120,7 @@ def __init__(self, kwds) -> None:
# validate header options for mi
self.header = kwds.get("header")
- if isinstance(self.header, (list, tuple, np.ndarray)):
- if not all(map(is_integer, self.header)):
- raise ValueError("header must be integer or list of integers")
- if any(i < 0 for i in self.header):
- raise ValueError(
- "cannot specify multi-index header with negative integers"
- )
+ if is_list_like(self.header, allow_sets=False):
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
@@ -138,9 +132,8 @@ def __init__(self, kwds) -> None:
# validate index_col that only contains integers
if self.index_col is not None:
- is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
- is_sequence
+ is_list_like(self.index_col, allow_sets=False)
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
@@ -148,21 +141,11 @@ def __init__(self, kwds) -> None:
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
- elif self.header is not None:
+ elif self.header is not None and self.prefix is not None:
# GH 27394
- if self.prefix is not None:
- raise ValueError(
- "Argument prefix must be None if argument header is not None"
- )
- # GH 16338
- elif not is_integer(self.header):
- raise ValueError("header must be integer or list of integers")
- # GH 27779
- elif self.header < 0:
- raise ValueError(
- "Passing negative integer to header is invalid. "
- "For no header, use header=None instead"
- )
+ raise ValueError(
+ "Argument prefix must be None if argument header is not None"
+ )
self._name_processed = False
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 1e0f74ea41453..c58896e9e1baf 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1219,6 +1219,42 @@ def test_read_excel_nrows_non_integer_parameter(self, read_ext):
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, nrows="5")
+ @pytest.mark.parametrize(
+ "filename,sheet_name,header,index_col,skiprows",
+ [
+ ("testmultiindex", "mi_column", [0, 1], 0, None),
+ ("testmultiindex", "mi_index", None, [0, 1], None),
+ ("testmultiindex", "both", [0, 1], [0, 1], None),
+ ("testmultiindex", "mi_column_name", [0, 1], 0, None),
+ ("testskiprows", "skiprows_list", None, None, [0, 2]),
+ ("testskiprows", "skiprows_list", None, None, lambda x: x == 0 or x == 2),
+ ],
+ )
+ def test_read_excel_nrows_params(
+ self, read_ext, filename, sheet_name, header, index_col, skiprows
+ ):
+ """
+ For various parameters, we should get the same result whether we
+ limit the rows during load (nrows=3) or after (df.iloc[:3]).
+ """
+ # GH 46894
+ expected = pd.read_excel(
+ filename + read_ext,
+ sheet_name=sheet_name,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ ).iloc[:3]
+ actual = pd.read_excel(
+ filename + read_ext,
+ sheet_name=sheet_name,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ nrows=3,
+ )
+ tm.assert_frame_equal(actual, expected)
+
def test_read_excel_squeeze(self, read_ext):
# GH 12157
f = "test_squeeze" + read_ext
| - [x] closes #32727
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I timed read_excel on a file with 10 columns and 1000 rows, and report the best of 10 repeats (ms) below. This change can make a modest improvement on xls and ods files, and a significant improvement on xlsx and xlsb files. When `nrows=None` this has no measurable impact on the run time.
| ext | nrows | time (main) | time (this branch) |
|------|-------|------------|------------|
| xls | None | 22.4 | 22.1 |
| xls | 10 | 21.4 | 17.0 |
| xlsx | None | 99.1 | 99.7 |
| xlsx | 10 | 98.0 | 8.8 |
| xlsb | None | 81.0 | 80.2 |
| xlsb | 10 | 80.2 | 4.8 |
| ods | None | 571 | 569 |
| ods | 10 | 566 | 517 |
Here are the results of ```asv run -e -E existing --bench ReadExcel``` showing similar results (the benchmark spreadsheet is different than the one above).
```
[75.00%] ··· io.excel.ReadExcel.time_read_excel ok
[75.00%] ··· ========== ============
engine
---------- ------------
xlrd 36.5±0.3ms
openpyxl 162±0.1ms
odf 688±5ms
========== ============
[100.00%] ··· io.excel.ReadExcelNRows.time_read_excel ok
[100.00%] ··· ========== ============
engine
---------- ------------
xlrd 24.5±0.1ms
openpyxl 29.0±0.1ms
odf 508±3ms
========== ============
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/46894 | 2022-04-29T05:06:53Z | 2022-06-05T23:49:12Z | 2022-06-05T23:49:12Z | 2022-06-07T03:22:23Z |
REF: handle 2D in tslibs.vectorized | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 8f145d0d66acc..4eb1494c4d56c 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -354,6 +354,7 @@ def array_to_timedelta64(
raise ValueError(
"unit must not be specified if the input contains a str"
)
+ cnp.PyArray_ITER_NEXT(it)
# Usually, we have all strings. If so, we hit the fast path.
# If this path fails, we try conversion a different way, and
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi
index a8f81514c5645..61148605aadea 100644
--- a/pandas/_libs/tslibs/vectorized.pyi
+++ b/pandas/_libs/tslibs/vectorized.pyi
@@ -11,24 +11,24 @@ from pandas._libs.tslibs.offsets import BaseOffset
from pandas._typing import npt
def dt64arr_to_periodarr(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
freq: int,
tz: tzinfo | None,
-) -> npt.NDArray[np.int64]: ... # np.ndarray[np.int64, ndim=1]
+) -> npt.NDArray[np.int64]: ...
def is_date_array_normalized(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
) -> bool: ...
def normalize_i8_timestamps(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
) -> npt.NDArray[np.int64]: ...
def get_resolution(
- stamps: npt.NDArray[np.int64], # const int64_t[:]
+ stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
) -> Resolution: ...
def ints_to_pydatetime(
- arr: npt.NDArray[np.int64], # const int64_t[:}]
+ arr: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
freq: BaseOffset | None = ...,
fold: bool = ...,
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index fea357c9da98b..31d0579900abd 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -38,7 +38,8 @@ from .tzconversion cimport Localizer
@cython.boundscheck(False)
@cython.wraparound(False)
-def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
+def tz_convert_from_utc(ndarray stamps, tzinfo tz):
+ # stamps is int64_t, arbitrary ndim
"""
Convert the values (in i8) from UTC to tz
@@ -54,27 +55,33 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t pos, i, n = stamps.shape[0]
+ Py_ssize_t pos, i, n = stamps.size
- int64_t[::1] result
+ ndarray result
+ cnp.broadcast mi
if tz is None or is_utc(tz) or stamps.size == 0:
# Much faster than going through the "standard" pattern below
- return stamps.base.copy()
+ return stamps.copy()
- result = np.empty(n, dtype=np.int64)
+ result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_INT64, 0)
+ mi = cnp.PyArray_MultiIterNew2(result, stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
if utc_val == NPY_NAT:
- result[i] = NPY_NAT
- continue
+ local_val = NPY_NAT
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ # Analogous to: result[i] = local_val
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = local_val
- result[i] = local_val
+ cnp.PyArray_MultiIter_NEXT(mi)
- return result.base
+ return result
# -------------------------------------------------------------------------
@@ -83,12 +90,13 @@ def tz_convert_from_utc(const int64_t[:] stamps, tzinfo tz):
@cython.wraparound(False)
@cython.boundscheck(False)
def ints_to_pydatetime(
- const int64_t[:] stamps,
+ ndarray stamps,
tzinfo tz=None,
BaseOffset freq=None,
bint fold=False,
str box="datetime"
) -> np.ndarray:
+ # stamps is int64, arbitrary ndim
"""
Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp.
@@ -119,13 +127,21 @@ def ints_to_pydatetime(
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
npy_datetimestruct dts
tzinfo new_tz
- ndarray[object] result = np.empty(n, dtype=object)
bint use_date = False, use_time = False, use_ts = False, use_pydt = False
+ object res_val
+
+ # Note that `result` (and thus `result_flat`) is C-order and
+ # `it` iterates C-order as well, so the iteration matches
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ ndarray result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_OBJECT, 0)
+ object[::1] res_flat = result.ravel() # should NOT be a copy
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
if box == "date":
assert (tz is None), "tz should be None when converting to date"
@@ -142,31 +158,44 @@ def ints_to_pydatetime(
)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_ITER_DATA(it))[0]
+
new_tz = tz
if utc_val == NPY_NAT:
- result[i] = <object>NaT
- continue
+ res_val = <object>NaT
- local_val = info.utc_val_to_local_val(utc_val, &pos)
- if info.use_pytz:
- # find right representation of dst etc in pytz timezone
- new_tz = tz._tzinfos[tz._transition_info[pos]]
-
- dt64_to_dtstruct(local_val, &dts)
-
- if use_ts:
- result[i] = create_timestamp_from_ts(utc_val, dts, new_tz, freq, fold)
- elif use_pydt:
- result[i] = datetime(
- dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us,
- new_tz, fold=fold,
- )
- elif use_date:
- result[i] = date(dts.year, dts.month, dts.day)
else:
- result[i] = time(dts.hour, dts.min, dts.sec, dts.us, new_tz, fold=fold)
+
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
+ if info.use_pytz:
+ # find right representation of dst etc in pytz timezone
+ new_tz = tz._tzinfos[tz._transition_info[pos]]
+
+ dt64_to_dtstruct(local_val, &dts)
+
+ if use_ts:
+ res_val = create_timestamp_from_ts(utc_val, dts, new_tz, freq, fold)
+ elif use_pydt:
+ res_val = datetime(
+ dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us,
+ new_tz, fold=fold,
+ )
+ elif use_date:
+ res_val = date(dts.year, dts.month, dts.day)
+ else:
+ res_val = time(dts.hour, dts.min, dts.sec, dts.us, new_tz, fold=fold)
+
+ # Note: we can index result directly instead of using PyArray_MultiIter_DATA
+ # like we do for the other functions because result is known C-contiguous
+ # and is the first argument to PyArray_MultiIterNew2. The usual pattern
+ # does not seem to work with object dtype.
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ res_flat[i] = res_val
+
+ cnp.PyArray_ITER_NEXT(it)
return result
@@ -190,27 +219,33 @@ cdef inline c_Resolution _reso_stamp(npy_datetimestruct *dts):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution:
+def get_resolution(ndarray stamps, tzinfo tz=None) -> Resolution:
+ # stamps is int64_t, any ndim
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
npy_datetimestruct dts
c_Resolution reso = c_Resolution.RESO_DAY, curr_reso
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = cnp.PyArray_GETITEM(stamps, cnp.PyArray_ITER_DATA(it))
+
if utc_val == NPY_NAT:
- continue
+ pass
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ dt64_to_dtstruct(local_val, &dts)
+ curr_reso = _reso_stamp(&dts)
+ if curr_reso < reso:
+ reso = curr_reso
- dt64_to_dtstruct(local_val, &dts)
- curr_reso = _reso_stamp(&dts)
- if curr_reso < reso:
- reso = curr_reso
+ cnp.PyArray_ITER_NEXT(it)
return Resolution(reso)
@@ -221,7 +256,8 @@ def get_resolution(const int64_t[:] stamps, tzinfo tz=None) -> Resolution:
@cython.cdivision(False)
@cython.wraparound(False)
@cython.boundscheck(False)
-cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz):
+cpdef ndarray normalize_i8_timestamps(ndarray stamps, tzinfo tz):
+ # stamps is int64_t, arbitrary ndim
"""
Normalize each of the (nanosecond) timezone aware timestamps in the given
array by rounding down to the beginning of the day (i.e. midnight).
@@ -238,28 +274,35 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t
"""
cdef:
Localizer info = Localizer(tz)
- int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ int64_t utc_val, local_val, res_val
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
- int64_t[::1] result = np.empty(n, dtype=np.int64)
+ ndarray result = cnp.PyArray_EMPTY(stamps.ndim, stamps.shape, cnp.NPY_INT64, 0)
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew2(result, stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
if utc_val == NPY_NAT:
- result[i] = NPY_NAT
- continue
+ res_val = NPY_NAT
+ else:
+ local_val = info.utc_val_to_local_val(utc_val, &pos)
+ res_val = local_val - (local_val % DAY_NANOS)
- local_val = info.utc_val_to_local_val(utc_val, &pos)
+ # Analogous to: result[i] = res_val
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_val
- result[i] = local_val - (local_val % DAY_NANOS)
+ cnp.PyArray_MultiIter_NEXT(mi)
- return result.base # `.base` to access underlying ndarray
+ return result
@cython.wraparound(False)
@cython.boundscheck(False)
-def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool:
+def is_date_array_normalized(ndarray stamps, tzinfo tz=None) -> bool:
+ # stamps is int64_t, arbitrary ndim
"""
Check if all of the given (nanosecond) timestamps are normalized to
midnight, i.e. hour == minute == second == 0. If the optional timezone
@@ -277,16 +320,21 @@ def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None) -> bool:
cdef:
Localizer info = Localizer(tz)
int64_t utc_val, local_val
- Py_ssize_t i, n = stamps.shape[0]
+ Py_ssize_t i, n = stamps.size
Py_ssize_t pos = -1 # unused, avoid not-initialized warning
+ cnp.flatiter it = cnp.PyArray_IterNew(stamps)
for i in range(n):
- utc_val = stamps[i]
+ # Analogous to: utc_val = stamps[i]
+ utc_val = cnp.PyArray_GETITEM(stamps, cnp.PyArray_ITER_DATA(it))
+
local_val = info.utc_val_to_local_val(utc_val, &pos)
if local_val % DAY_NANOS != 0:
return False
+ cnp.PyArray_ITER_NEXT(it)
+
return True
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46886 | 2022-04-27T21:31:15Z | 2022-05-06T21:30:54Z | 2022-05-06T21:30:54Z | 2022-05-06T22:25:47Z |
DOC: GH27557 Updates Python support documentation to match NumPy NEP 29 | diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index f8e6bda2085d8..d75262c08dfd6 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -51,7 +51,7 @@ pandas may change the behavior of experimental features at any time.
Python support
~~~~~~~~~~~~~~
-pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
-pandas **major** or **minor** releases.
+pandas mirrors the `NumPy guidelines for Python support <https://numpy.org/neps/nep-0029-deprecation_policy.html#implementation>`__.
+
.. _SemVer: https://semver.org
| - [x] closes [#27557](https://github.com/pandas-dev/pandas/issues/27557)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This updates the documentation for Python support to match NEP 29, and includes a link to NEP 29. The change is at line 410 of docs/development/policies.html. | https://api.github.com/repos/pandas-dev/pandas/pulls/46883 | 2022-04-27T07:02:56Z | 2022-04-28T13:37:33Z | 2022-04-28T13:37:33Z | 2022-04-28T13:37:34Z |
Backport PR #45247 on branch 1.4.x (PERF: find_stack_level) | diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 806e2abe83a92..ef467f096e963 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -29,17 +29,20 @@ def find_stack_level() -> int:
Find the first place in the stack that is not inside pandas
(tests notwithstanding).
"""
- stack = inspect.stack()
import pandas as pd
pkg_dir = os.path.dirname(pd.__file__)
test_dir = os.path.join(pkg_dir, "tests")
- for n in range(len(stack)):
- fname = stack[n].filename
+ # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
+ frame = inspect.currentframe()
+ n = 0
+ while frame:
+ fname = inspect.getfile(frame)
if fname.startswith(pkg_dir) and not fname.startswith(test_dir):
- continue
+ frame = frame.f_back
+ n += 1
else:
break
return n
| Backport PR #45247: PERF: find_stack_level | https://api.github.com/repos/pandas-dev/pandas/pulls/46881 | 2022-04-27T03:45:59Z | 2022-04-27T12:31:44Z | 2022-04-27T12:31:44Z | 2022-04-27T12:59:44Z |
REF: libhashtable.mode support mask | diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index 481ff0d36c460..5c7be5e660fd9 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -197,10 +197,13 @@ def duplicated(
values: np.ndarray,
keep: Literal["last", "first", False] = ...,
) -> npt.NDArray[np.bool_]: ...
-def mode(values: np.ndarray, dropna: bool) -> np.ndarray: ...
+def mode(
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+) -> np.ndarray: ...
def value_count(
values: np.ndarray,
dropna: bool,
+ mask: npt.NDArray[np.bool_] | None = None,
) -> tuple[np.ndarray, npt.NDArray[np.int64],]: ... # np.ndarray[same-as-values]
# arr and values should have same dtype
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index 11a45bb194c03..f7c41b32864be 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -31,9 +31,9 @@ dtypes = [('Complex128', 'complex128', 'complex128',
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
+cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, const uint8_t[:] mask=None):
{{else}}
-cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
+cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna, const uint8_t[:] mask=None):
{{endif}}
cdef:
Py_ssize_t i = 0
@@ -46,6 +46,11 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{c_type}} val
int ret = 0
+ bint uses_mask = mask is not None
+ bint isna_entry = False
+
+ if uses_mask and not dropna:
+ raise NotImplementedError("uses_mask not implemented with dropna=False")
# we track the order in which keys are first seen (GH39009),
# khash-map isn't insertion-ordered, thus:
@@ -56,6 +61,9 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
table = kh_init_{{ttype}}()
{{if dtype == 'object'}}
+ if uses_mask:
+ raise NotImplementedError("uses_mask not implemented with object dtype")
+
kh_resize_{{ttype}}(table, n // 10)
for i in range(n):
@@ -74,7 +82,13 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
for i in range(n):
val = {{to_c_type}}(values[i])
- if not is_nan_{{c_type}}(val) or not dropna:
+ if dropna:
+ if uses_mask:
+ isna_entry = mask[i]
+ else:
+ isna_entry = is_nan_{{c_type}}(val)
+
+ if not dropna or not isna_entry:
k = kh_get_{{ttype}}(table, val)
if k != table.n_buckets:
table.vals[k] += 1
@@ -251,37 +265,37 @@ ctypedef fused htfunc_t:
complex64_t
-cpdef value_count(ndarray[htfunc_t] values, bint dropna):
+cpdef value_count(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
if htfunc_t is object:
- return value_count_object(values, dropna)
+ return value_count_object(values, dropna, mask=mask)
elif htfunc_t is int8_t:
- return value_count_int8(values, dropna)
+ return value_count_int8(values, dropna, mask=mask)
elif htfunc_t is int16_t:
- return value_count_int16(values, dropna)
+ return value_count_int16(values, dropna, mask=mask)
elif htfunc_t is int32_t:
- return value_count_int32(values, dropna)
+ return value_count_int32(values, dropna, mask=mask)
elif htfunc_t is int64_t:
- return value_count_int64(values, dropna)
+ return value_count_int64(values, dropna, mask=mask)
elif htfunc_t is uint8_t:
- return value_count_uint8(values, dropna)
+ return value_count_uint8(values, dropna, mask=mask)
elif htfunc_t is uint16_t:
- return value_count_uint16(values, dropna)
+ return value_count_uint16(values, dropna, mask=mask)
elif htfunc_t is uint32_t:
- return value_count_uint32(values, dropna)
+ return value_count_uint32(values, dropna, mask=mask)
elif htfunc_t is uint64_t:
- return value_count_uint64(values, dropna)
+ return value_count_uint64(values, dropna, mask=mask)
elif htfunc_t is float64_t:
- return value_count_float64(values, dropna)
+ return value_count_float64(values, dropna, mask=mask)
elif htfunc_t is float32_t:
- return value_count_float32(values, dropna)
+ return value_count_float32(values, dropna, mask=mask)
elif htfunc_t is complex128_t:
- return value_count_complex128(values, dropna)
+ return value_count_complex128(values, dropna, mask=mask)
elif htfunc_t is complex64_t:
- return value_count_complex64(values, dropna)
+ return value_count_complex64(values, dropna, mask=mask)
else:
raise TypeError(values.dtype)
@@ -361,7 +375,7 @@ cpdef ismember(ndarray[htfunc_t] arr, ndarray[htfunc_t] values):
@cython.wraparound(False)
@cython.boundscheck(False)
-def mode(ndarray[htfunc_t] values, bint dropna):
+def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
# TODO(cython3): use const htfunct_t[:]
cdef:
@@ -372,7 +386,7 @@ def mode(ndarray[htfunc_t] values, bint dropna):
int64_t count, max_count = -1
Py_ssize_t nkeys, k, j = 0
- keys, counts = value_count(values, dropna)
+ keys, counts = value_count(values, dropna, mask=mask)
nkeys = len(keys)
modes = np.empty(nkeys, dtype=values.dtype)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 0c0b93f41c657..112c401500472 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -858,12 +858,15 @@ def value_counts(
# Called once from SparseArray, otherwise could be private
-def value_counts_arraylike(values: np.ndarray, dropna: bool):
+def value_counts_arraylike(
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None
+):
"""
Parameters
----------
values : np.ndarray
dropna : bool
+ mask : np.ndarray[bool] or None, default None
Returns
-------
@@ -873,7 +876,7 @@ def value_counts_arraylike(values: np.ndarray, dropna: bool):
original = values
values = _ensure_data(values)
- keys, counts = htable.value_count(values, dropna)
+ keys, counts = htable.value_count(values, dropna, mask=mask)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
@@ -911,7 +914,9 @@ def duplicated(
return htable.duplicated(values, keep=keep)
-def mode(values: ArrayLike, dropna: bool = True) -> ArrayLike:
+def mode(
+ values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None
+) -> ArrayLike:
"""
Returns the mode(s) of an array.
@@ -937,7 +942,7 @@ def mode(values: ArrayLike, dropna: bool = True) -> ArrayLike:
values = _ensure_data(values)
- npresult = htable.mode(values, dropna=dropna)
+ npresult = htable.mode(values, dropna=dropna, mask=mask)
try:
npresult = np.sort(npresult)
except TypeError as err:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index eca7a205983ef..01a04b7aa63d9 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -26,7 +26,6 @@
from pandas._libs import (
NaT,
algos as libalgos,
- hashtable as htable,
lib,
)
from pandas._libs.arrays import NDArrayBacked
@@ -2255,14 +2254,15 @@ def mode(self, dropna: bool = True) -> Categorical:
def _mode(self, dropna: bool = True) -> Categorical:
codes = self._codes
+ mask = None
if dropna:
- good = self._codes != -1
- codes = self._codes[good]
+ mask = self.isna()
- codes = htable.mode(codes, dropna)
- codes.sort()
- codes = coerce_indexer_dtype(codes, self.dtype.categories)
- return self._from_backing_data(codes)
+ res_codes = algorithms.mode(codes, mask=mask)
+ res_codes = cast(np.ndarray, res_codes)
+ assert res_codes.dtype == codes.dtype
+ res = self._from_backing_data(res_codes)
+ return res
# ------------------------------------------------------------------
# ExtensionArray Interface
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 95363e598a06c..5ae71b305ac60 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -57,6 +57,7 @@
)
from pandas.core import (
+ algorithms as algos,
arraylike,
missing,
nanops,
@@ -907,6 +908,15 @@ def value_counts(self, dropna: bool = True) -> Series:
)
from pandas.arrays import IntegerArray
+ if dropna:
+ keys, counts = algos.value_counts_arraylike(
+ self._data, dropna=True, mask=self._mask
+ )
+ res = Series(counts, index=keys)
+ res.index = res.index.astype(self.dtype)
+ res = res.astype("Int64")
+ return res
+
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46880 | 2022-04-26T23:32:51Z | 2022-04-27T12:32:25Z | 2022-04-27T12:32:25Z | 2022-04-27T14:56:08Z |
PERF: use C version of np.empty | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 5094f6f07d534..13bd95004445d 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -873,7 +873,7 @@ def get_level_sorter(
"""
cdef:
Py_ssize_t i, l, r
- ndarray[intp_t, ndim=1] out = np.empty(len(codes), dtype=np.intp)
+ ndarray[intp_t, ndim=1] out = cnp.PyArray_EMPTY(1, codes.shape, cnp.NPY_INTP, 0)
for i in range(len(starts) - 1):
l, r = starts[i], starts[i + 1]
@@ -2255,11 +2255,11 @@ def maybe_convert_numeric(
int status, maybe_int
Py_ssize_t i, n = values.size
Seen seen = Seen(coerce_numeric)
- ndarray[float64_t, ndim=1] floats = np.empty(n, dtype='f8')
- ndarray[complex128_t, ndim=1] complexes = np.empty(n, dtype='c16')
- ndarray[int64_t, ndim=1] ints = np.empty(n, dtype='i8')
- ndarray[uint64_t, ndim=1] uints = np.empty(n, dtype='u8')
- ndarray[uint8_t, ndim=1] bools = np.empty(n, dtype='u1')
+ ndarray[float64_t, ndim=1] floats = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_FLOAT64, 0)
+ ndarray[complex128_t, ndim=1] complexes = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_COMPLEX128, 0)
+ ndarray[int64_t, ndim=1] ints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_INT64, 0)
+ ndarray[uint64_t, ndim=1] uints = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT64, 0)
+ ndarray[uint8_t, ndim=1] bools = cnp.PyArray_EMPTY(1, values.shape, cnp.NPY_UINT8, 0)
ndarray[uint8_t, ndim=1] mask = np.zeros(n, dtype="u1")
float64_t fval
bint allow_null_in_int = convert_to_masked_nullable
@@ -2479,11 +2479,11 @@ def maybe_convert_objects(ndarray[object] objects,
n = len(objects)
- floats = np.empty(n, dtype='f8')
- complexes = np.empty(n, dtype='c16')
- ints = np.empty(n, dtype='i8')
- uints = np.empty(n, dtype='u8')
- bools = np.empty(n, dtype=np.uint8)
+ floats = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_FLOAT64, 0)
+ complexes = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_COMPLEX128, 0)
+ ints = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_INT64, 0)
+ uints = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_UINT64, 0)
+ bools = cnp.PyArray_EMPTY(1, objects.shape, cnp.NPY_UINT8, 0)
mask = np.full(n, False)
if convert_datetime:
@@ -2785,7 +2785,7 @@ cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas):
else:
# ExtensionDtype
cls = dtype.construct_array_type()
- i8vals = np.empty(len(datetimes), dtype="i8")
+ i8vals = cnp.PyArray_EMPTY(1, datetimes.shape, cnp.NPY_INT64, 0)
i8vals.fill(NPY_NAT)
result = cls(i8vals, dtype=dtype)
return result
@@ -2888,7 +2888,7 @@ def map_infer(
object val
n = len(arr)
- result = np.empty(n, dtype=object)
+ result = cnp.PyArray_EMPTY(1, arr.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
if ignore_na and checknull(arr[i]):
result[i] = arr[i]
@@ -3083,7 +3083,7 @@ cpdef ndarray eq_NA_compat(ndarray[object] arr, object key):
key is assumed to have `not isna(key)`
"""
cdef:
- ndarray[uint8_t, cast=True] result = np.empty(len(arr), dtype=bool)
+ ndarray[uint8_t, cast=True] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_BOOL, 0)
Py_ssize_t i
object item
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index c96a65cdff525..f4d495de26600 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -123,7 +123,7 @@ def format_array_from_datetime(
ndarray[int64_t] consider_values
bint show_ms = False, show_us = False, show_ns = False
bint basic_format = False
- ndarray[object] result = np.empty(N, dtype=object)
+ ndarray[object] result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
object ts, res
npy_datetimestruct dts
@@ -349,7 +349,7 @@ def array_with_unit_to_datetime(
# and are in ignore mode
# redo as object
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
val = values[i]
@@ -668,7 +668,7 @@ cdef ndarray[object] ignore_errors_out_of_bounds_fallback(ndarray[object] values
Py_ssize_t i, n = len(values)
object val
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
for i in range(n):
val = values[i]
@@ -730,7 +730,7 @@ cdef _array_to_datetime_object(
assert is_raise or is_ignore or is_coerce
- oresult = np.empty(n, dtype=object)
+ oresult = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
# We return an object array and only attempt to parse:
# 1) NaT or NaT-like values
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 3d04562cb73c3..c2637db23293e 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1057,7 +1057,7 @@ def period_asfreq_arr(ndarray[int64_t] arr, int freq1, int freq2, bint end):
cdef:
Py_ssize_t n = len(arr)
Py_ssize_t increment = arr.strides[0] // 8
- ndarray[int64_t] result = np.empty(n, dtype=np.int64)
+ ndarray[int64_t] result = cnp.PyArray_EMPTY(arr.ndim, arr.shape, cnp.NPY_INT64, 0)
_period_asfreq(
<int64_t*>cnp.PyArray_DATA(arr),
@@ -1440,7 +1440,7 @@ def extract_ordinals(ndarray values, freq) -> np.ndarray:
cdef:
Py_ssize_t i, n = values.size
int64_t ordinal
- ndarray ordinals = np.empty((<object>values).shape, dtype=np.int64)
+ ndarray ordinals = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_INT64, 0)
cnp.broadcast mi = cnp.PyArray_MultiIterNew2(ordinals, values)
object p
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/46878 | 2022-04-26T18:16:07Z | 2022-04-27T12:35:40Z | 2022-04-27T12:35:40Z | 2022-04-27T14:55:56Z |
TST: Fix flaky xfail condition typo | diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 852e85968d43f..910449d98bcc5 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -840,7 +840,7 @@ def test_basic_series_frame_alignment(
and parser == "pandas"
and index_name == "index"
and r_idx_type == "i"
- and c_idx_type == "c"
+ and c_idx_type == "s"
):
reason = (
f"Flaky column ordering when engine={engine}, "
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
This flaky test resurfaced in https://github.com/pandas-dev/pandas/runs/6152914367?check_suite_focus=true but that was due to a typo in the if condition in https://github.com/pandas-dev/pandas/pull/46796
| https://api.github.com/repos/pandas-dev/pandas/pulls/46871 | 2022-04-25T19:57:55Z | 2022-04-26T00:22:29Z | 2022-04-26T00:22:29Z | 2022-04-26T01:15:58Z |
DOC: fix typo in docstrings | diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index dda0d9549e7b3..ab42fcd92a3d9 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -1146,7 +1146,7 @@ def assert_frame_equal(
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
- differences. Is is mostly intended for use in unit tests.
+ differences. It is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
| Correct a spelling mistake in docstrings of `pandas.testing.assert_frame_equal`.
Before
```python: before
Is is mostly intended for use in unit tests.
```
After
```
It is mostly intended for use in unit tests.
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/46866 | 2022-04-25T13:13:39Z | 2022-04-25T13:23:37Z | 2022-04-25T13:23:37Z | 2022-04-25T13:23:37Z |
CLN: removes cython implementation of groupby count | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cb237b93c70ba..e81aaebe77807 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4562,7 +4562,7 @@ def _count_level(self, level, axis=0, numeric_only=False):
level_index = count_axis.levels[level]
labels = com._ensure_int64(count_axis.labels[level])
- counts = lib.count_level_2d(mask, labels, len(level_index))
+ counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index,
columns=agg_axis)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 43110494d675b..1f5855e63dee8 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -69,7 +69,7 @@
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
- 'rank', 'quantile', 'count',
+ 'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
@@ -149,9 +149,6 @@ def _last(x):
return _last(x)
-def _count_compat(x, axis=0):
- return x.count() # .size != .count(); count excludes nan
-
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
@@ -801,11 +798,6 @@ def size(self):
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
- _count = _groupby_function('_count', 'count', _count_compat,
- numeric_only=False)
-
- def count(self, axis=0):
- return self._count().astype('int64')
def ohlc(self):
"""
@@ -1463,7 +1455,6 @@ def get_group_levels(self):
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
- 'count': 'group_count',
}
_cython_arity = {
@@ -3468,6 +3459,24 @@ def _apply_to_column_groupbys(self, func):
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
+ def count(self):
+ from functools import partial
+ from pandas.lib import count_level_2d
+ from pandas.core.common import _isnull_ndarraylike as isnull
+
+ data, _ = self._get_data_to_aggregate()
+ ids, _, ngroups = self.grouper.group_info
+ mask = ids != -1
+
+ val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks)
+ loc = (blk.mgr_locs for blk in data.blocks)
+
+ counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
+ blk = map(make_block, map(counter, val), loc)
+
+ return self._wrap_agged_blocks(data.items, list(blk))
+
+
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 7b2d849695c98..2b4974155d44c 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -1253,19 +1253,32 @@ def lookup_values(ndarray[object] values, dict mapping):
return maybe_convert_objects(result)
+@cython.boundscheck(False)
+@cython.wraparound(False)
def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
- ndarray[int64_t] labels, Py_ssize_t max_bin):
+ ndarray[int64_t, ndim=1] labels,
+ Py_ssize_t max_bin,
+ int axis):
cdef:
Py_ssize_t i, j, k, n
ndarray[int64_t, ndim=2] counts
+ assert(axis == 0 or axis == 1)
n, k = (<object> mask).shape
- counts = np.zeros((max_bin, k), dtype='i8')
- for i from 0 <= i < n:
- for j from 0 <= j < k:
- if mask[i, j]:
- counts[labels[i], j] += 1
+ if axis == 0:
+ counts = np.zeros((max_bin, k), dtype='i8')
+ with nogil:
+ for i from 0 <= i < n:
+ for j from 0 <= j < k:
+ counts[labels[i], j] += mask[i, j]
+
+ else: # axis == 1
+ counts = np.zeros((n, max_bin), dtype='i8')
+ with nogil:
+ for i from 0 <= i < n:
+ for j from 0 <= j < k:
+ counts[i, labels[j]] += mask[i, j]
return counts
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index c086919d94644..b055d75df4cf4 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -971,44 +971,6 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
"""
-group_count_template = """@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(c_type)s, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- %(c_type)s val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
- %(nogil)s
- %(tab)sfor i in range(N):
- %(tab)s lab = labels[i]
- %(tab)s if lab < 0:
- %(tab)s continue
-
- %(tab)s counts[lab] += 1
- %(tab)s for j in range(K):
- %(tab)s val = values[i, j]
-
- %(tab)s # not nan
- %(tab)s nobs[lab, j] += val == val and val != iNaT
-
- %(tab)sfor i in range(ncounts):
- %(tab)s for j in range(K):
- %(tab)s out[i, j] = nobs[i, j]
-"""
-
# add passing bin edges, instead of labels
@@ -1995,8 +1957,6 @@ def generate_from_template(template, exclude=None):
groupby_min_max = [group_min_template,
group_max_template]
-groupby_count = [group_count_template]
-
templates_1d = [map_indices_template,
pad_template,
backfill_template,
@@ -2051,12 +2011,6 @@ def generate_take_cython_file():
print(generate_put_min_max_template(template, use_ints=True),
file=f)
- for template in groupby_count:
- print(generate_put_selection_template(template, use_ints=True,
- use_datelikes=True,
- use_objects=True),
- file=f)
-
for template in nobool_1d_templates:
print(generate_from_template(template, exclude=['bool']), file=f)
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index c0ecd04749e58..2f2fd528999d6 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -7930,192 +7930,6 @@ def group_max_int64(ndarray[int64_t, ndim=2] out,
out[i, j] = maxx[i, j]
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- float64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[lab, j] += val == val and val != iNaT
-
- for i in range(ncounts):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- float32_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[lab, j] += val == val and val != iNaT
-
- for i in range(ncounts):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- int64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[lab, j] += val == val and val != iNaT
-
- for i in range(ncounts):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- object val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
-
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[lab, j] += val == val and val != iNaT
-
- for i in range(ncounts):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_count_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, lab, ncounts = len(counts)
- Py_ssize_t N = values.shape[0], K = values.shape[1]
- int64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(values) != len(labels):
- raise AssertionError("len(index) != len(labels)")
-
-
- with nogil:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[lab, j] += val == val and val != iNaT
-
- for i in range(ncounts):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def left_join_indexer_unique_float64(ndarray[float64_t] left,
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f5693983f1cc1..a85e68602493b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -2481,6 +2481,30 @@ def test_size(self):
self.assertEqual(result[key], len(group))
def test_count(self):
+ from string import ascii_lowercase
+ n = 1 << 15
+ dr = date_range('2015-08-30', periods=n // 10, freq='T')
+
+ df = DataFrame({
+ '1st':np.random.choice(list(ascii_lowercase), n),
+ '2nd':np.random.randint(0, 5, n),
+ '3rd':np.random.randn(n).round(3),
+ '4th':np.random.randint(-10, 10, n),
+ '5th':np.random.choice(dr, n),
+ '6th':np.random.randn(n).round(3),
+ '7th':np.random.randn(n).round(3),
+ '8th':np.random.choice(dr, n) - np.random.choice(dr, 1),
+ '9th':np.random.choice(list(ascii_lowercase), n)})
+
+ for col in df.columns.drop(['1st', '2nd', '4th']):
+ df.loc[np.random.choice(n, n // 10), col] = np.nan
+
+ df['9th'] = df['9th'].astype('category')
+
+ for key in '1st', '2nd', ['1st', '2nd']:
+ left = df.groupby(key).count()
+ right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
+ assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
@@ -4966,7 +4990,7 @@ def test_groupby_whitelist(self):
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
- 'rank', 'quantile', 'count',
+ 'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
@@ -4987,7 +5011,7 @@ def test_groupby_whitelist(self):
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
- 'rank', 'quantile', 'count',
+ 'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
@@ -5253,7 +5277,6 @@ def test__cython_agg_general(self):
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
- ('count', np.size),
]
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
@@ -5439,26 +5462,26 @@ def test_first_last_max_min_on_time_data(self):
def test_groupby_preserves_sort(self):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
-
- df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3],
- 'string_groups':['z','a','z','a','a','g','g','g'],
+
+ df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3],
+ 'string_groups':['z','a','z','a','a','g','g','g'],
'ints':[8,7,4,5,2,9,1,1],
'floats':[2.3,5.3,6.2,-2.4,2.2,1.1,1.1,5],
'strings':['z','d','a','e','word','word2','42','47']})
# Try sorting on different types and with different group types
- for sort_column in ['ints', 'floats', 'strings', ['ints','floats'],
+ for sort_column in ['ints', 'floats', 'strings', ['ints','floats'],
['ints','strings']]:
- for group_column in ['int_groups', 'string_groups',
+ for group_column in ['int_groups', 'string_groups',
['int_groups','string_groups']]:
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
-
+
def test_sort(x):
assert_frame_equal(x, x.sort_values(by=sort_column))
-
+
g.apply(test_sort)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 0f55f79b8b9b9..df61387734cb3 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -829,7 +829,7 @@ def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
- expected = frame.groupby(axis=axis, level=i).count(axis=axis)
+ expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/11013 | 2015-09-05T22:48:36Z | 2015-09-07T20:30:05Z | 2015-09-07T20:30:05Z | 2015-09-08T01:37:28Z | |
DOC: Docstring Redesign to fix the problem of unexpected keyword arg (issue10888) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 38c5593e5911a..edf03db309120 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -1033,7 +1033,7 @@ Bug Fixes
- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
- Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`)
- Bug in ``merge`` with empty ``DataFrame`` may raise ``IndexError`` (:issue:`10824`)
-
+- Bug in ``to_latex`` where unexpected keyword argument for some documented arguments (:issue:`10888`)
- Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`)
- Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 47d0ef37383c4..f0608cbb654f8 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -24,11 +24,9 @@
import itertools
import csv
-docstring_to_string = """
- Parameters
- ----------
- frame : DataFrame
- object to render
+common_docstring = """
+ Parameters
+ ----------
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
@@ -51,20 +49,27 @@
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
+ index_names : bool, optional
+ Prints the names of the indexes, default True"""
+
+justify_docstring = """
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
- of the box.
- index_names : bool, optional
- Prints the names of the indexes, default True
+ of the box."""
+
+force_unicode_docstring = """
force_unicode : bool, default False
Always return a unicode result. Deprecated in v0.10.0 as string
- formatting is now rendered to unicode by default.
+ formatting is now rendered to unicode by default."""
+
+return_docstring = """
Returns
-------
formatted : string (or unicode, depending on data and options)"""
+docstring_to_string = common_docstring + justify_docstring + force_unicode_docstring + return_docstring
class CategoricalFormatter(object):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4bb06fe83649..5ab75f7d2658a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1413,7 +1413,7 @@ def to_stata(
write_index=write_index)
writer.write_file()
- @Appender(fmt.docstring_to_string, indents=1)
+ @Appender(fmt.common_docstring + fmt.justify_docstring + fmt.return_docstring, indents=1)
def to_string(self, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
@@ -1441,7 +1441,7 @@ def to_string(self, buf=None, columns=None, col_space=None,
result = formatter.buf.getvalue()
return result
- @Appender(fmt.docstring_to_string, indents=1)
+ @Appender(fmt.common_docstring + fmt.justify_docstring + fmt.return_docstring, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
@@ -1491,7 +1491,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
if buf is None:
return formatter.buf.getvalue()
- @Appender(fmt.docstring_to_string, indents=1)
+ @Appender(fmt.common_docstring + fmt.return_docstring, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
| Fixed issue #10888
| https://api.github.com/repos/pandas-dev/pandas/pulls/11011 | 2015-09-05T21:24:50Z | 2015-09-09T14:55:53Z | 2015-09-09T14:55:53Z | 2015-09-09T15:04:00Z |
BUG: Bug in pickling of a non-regular freq DatetimeIndex #11002 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 68d3861599cbd..1a31f38b585cf 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -894,7 +894,7 @@ Bug Fixes
- Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`)
- Bug in indexing with a mixed-integer ``Index`` causing an ``ImportError`` (:issue:`10610`)
- Bug in ``Series.count`` when index has nulls (:issue:`10946`)
-
+- Bug in pickling of a non-regular freq ``DatetimeIndex`` (:issue:`11002`)
- Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`)
- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index b1198f9758938..4ba15d319dc62 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -120,7 +120,8 @@ def _new_DatetimeIndex(cls, d):
# data are already in UTC
# so need to localize
tz = d.pop('tz',None)
- result = cls.__new__(cls, **d)
+
+ result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 84a4c3e08e493..a021195ea6c04 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2142,8 +2142,8 @@ def test_period_resample_with_local_timezone_dateutil(self):
def test_pickle(self):
- #GH4606
+ # GH4606
p = self.round_trip_pickle(NaT)
self.assertTrue(p is NaT)
@@ -2153,6 +2153,11 @@ def test_pickle(self):
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
+ # GH11002
+ # don't infer freq
+ idx = date_range('1750-1-1', '2050-1-1', freq='7D')
+ idx_p = self.round_trip_pickle(idx)
+ tm.assert_index_equal(idx, idx_p)
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
| closes #11002
| https://api.github.com/repos/pandas-dev/pandas/pulls/11006 | 2015-09-05T18:27:44Z | 2015-09-05T23:22:57Z | 2015-09-05T23:22:57Z | 2015-09-05T23:22:57Z |
DOC: consistent doc-string with function declaration, added missing param doc-string for sql.py | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index c0b69e435f494..2ed0126505c41 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -143,8 +143,8 @@ def execute(sql, con, cur=None, params=None):
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- cur : deprecated, cursor is obtained from connection
- params : list or tuple, optional
+ cur : deprecated, cursor is obtained from connection, default: None
+ params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
@@ -189,8 +189,9 @@ def tquery(sql, con=None, cur=None, retry=True):
----------
sql: string
SQL query to be executed
- con: DBAPI2 connection
- cur: deprecated, cursor is obtained from connection
+ con: DBAPI2 connection, default: None
+ cur: deprecated, cursor is obtained from connection, default: None
+ retry: boolean value to specify whether to retry after failure, default: True
Returns
-------
@@ -242,9 +243,10 @@ def uquery(sql, con=None, cur=None, retry=True, params=None):
----------
sql: string
SQL query to be executed
- con: DBAPI2 connection
- cur: deprecated, cursor is obtained from connection
- params: list or tuple, optional
+ con: DBAPI2 connection, default: None
+ cur: deprecated, cursor is obtained from connection, default: None
+ retry: boolean value to specify whether to retry after failure, default: True
+ params: list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
@@ -294,12 +296,12 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
- index_col : string, optional
+ index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
- parse_dates : list or dict
+ parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -308,7 +310,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
- columns : list
+ columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
@@ -369,18 +371,18 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- index_col : string, optional
+ index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
- params : list, tuple or dict, optional
+ params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
- parse_dates : list or dict
+ parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -428,18 +430,18 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- index_col : string, optional
+ index_col : string, optional, default: None
column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
- params : list, tuple or dict, optional
+ params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
- parse_dates : list or dict
+ parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -448,7 +450,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
- columns : list
+ columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
@@ -1061,13 +1063,13 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
----------
table_name : string
Name of SQL table in database
- index_col : string, optional
+ index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
- parse_dates : list or dict
+ parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -1076,7 +1078,7 @@ def read_table(self, table_name, index_col=None, coerce_float=True,
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
- columns : list
+ columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
@@ -1123,18 +1125,18 @@ def read_query(self, sql, index_col=None, coerce_float=True,
----------
sql : string
SQL query to be executed
- index_col : string, optional
+ index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
- params : list, tuple or dict, optional
+ params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
- parse_dates : list or dict
+ parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
@@ -1143,6 +1145,9 @@ def read_query(self, sql, index_col=None, coerce_float=True,
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
+ chunksize : int, default None
+ If specified, return an iterator where `chunksize` is the number
+ of rows to include in each chunk.
Returns
-------
@@ -1650,11 +1655,11 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None, dtype=None):
The flavor of SQL to use. Ignored when using SQLAlchemy connectable.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
- keys : string or sequence
+ keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
- library.
+ library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
| Added default values for params in doc-string and added missing param doc-string that are needed for new users.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11004 | 2015-09-05T18:15:57Z | 2015-09-05T18:38:17Z | 2015-09-05T18:38:17Z | 2015-09-05T18:39:41Z |
ENH: add Series.astype with the new tz dtype | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index dd13e8fabf0e9..7e96fdad29193 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1753,22 +1753,56 @@ TZ aware Dtypes
.. versionadded:: 0.17.0
-``Series/DatetimeIndex`` with a timezone naive value are represented with a dtype of ``datetime64[ns]``.
+``Series/DatetimeIndex`` with a timezone **naive** value are represented with a dtype of ``datetime64[ns]``.
.. ipython:: python
- dr = pd.date_range('20130101',periods=3)
- dr
- s = Series(dr)
- s
+ dr_naive = pd.date_range('20130101',periods=3)
+ dr_naive
+ s_naive = Series(dr_naive)
+ s_naive
-``Series/DatetimeIndex`` with a timezone aware value are represented with a dtype of ``datetime64[ns, tz]``.
+``Series/DatetimeIndex`` with a timezone **aware** value are represented with a dtype of ``datetime64[ns, tz]``.
.. ipython:: python
- dr = pd.date_range('20130101',periods=3,tz='US/Eastern')
- dr
- s = Series(dr)
- s
+ dr_aware = pd.date_range('20130101',periods=3,tz='US/Eastern')
+ dr_aware
+ s_aware = Series(dr_aware)
+ s_aware
+
+Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see :ref:`here <basics.dt_accessors>`.
+See the :ref:`docs <timeseries.dtypes>` for more details.
+
+Further more you can ``.astype(...)`` timezone aware (and naive).
+
+.. ipython:: python
+
+ # make this naive
+ s_aware.astype('datetime64[ns]')
+
+ # convert
+ s_aware.astype('datetime64[ns, CET]')
+ s_naive.astype('datetime64[ns, CET]')
+
+.. note::
+
+ Using the ``.values`` accessor on a ``Series``, returns an numpy array of the data.
+ These values are converted to UTC, as numpy does not currently support timezones (even though it is *printing* in the local timezone!).
+
+ .. ipython:: python
+
+ s_naive.values
+ s_aware.values
+
+ Further note that once converted to a numpy array these would lose the tz tenor.
+
+ .. ipython:: python
+
+ Series(s_aware.values)
+
+ However, these can be easily converted
+
+ .. ipython:: python
-Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see the :ref:`docs <basics.dt_accessors>` as well.
+ Series(s_aware).dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index f88e5c0a11f9f..9eb005a604b0c 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -447,9 +447,9 @@ Datetime with TZ
We are adding an implementation that natively supports datetime with timezones. A ``Series`` or a ``DataFrame`` column previously
*could* be assigned a datetime with timezones, and would work as an ``object`` dtype. This had performance issues with a large
-number rows. (:issue:`8260`, :issue:`10763`)
+number rows. See the :ref:`docs <timeseries.timezone_series>` for more details. (:issue:`8260`, :issue:`10763`).
-The new implementation allows for having a single-timezone across all rows, and operating on it in a performant manner.
+The new implementation allows for having a single-timezone across all rows, with operations in a performant manner.
.. ipython:: python
@@ -469,13 +469,15 @@ This uses a new-dtype representation as well, that is very similar in look-and-f
.. ipython:: python
df['B'].dtype
- type(df['B']).dtype
+ type(df['B'].dtype)
.. note::
There is a slightly different string repr for the underlying ``DatetimeIndex`` as a result of the dtype changes, but
functionally these are the same.
+ Previous Behavior:
+
.. code-block:: python
In [1]: pd.date_range('20130101',periods=3,tz='US/Eastern')
@@ -486,12 +488,13 @@ This uses a new-dtype representation as well, that is very similar in look-and-f
In [2]: pd.date_range('20130101',periods=3,tz='US/Eastern').dtype
Out[2]: dtype('<M8[ns]')
+ New Behavior:
+
.. ipython:: python
pd.date_range('20130101',periods=3,tz='US/Eastern')
pd.date_range('20130101',periods=3,tz='US/Eastern').dtype
-
.. _whatsnew_0170.api_breaking.convert_objects:
Changes to convert_objects
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 58ee36142d4fd..94eccad8e0185 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -18,6 +18,7 @@
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric,
is_internal_type)
+from pandas.core.dtypes import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
@@ -1868,6 +1869,26 @@ def __init__(self, values, placement,
fastpath=True, placement=placement,
**kwargs)
+ def _astype(self, dtype, **kwargs):
+ """
+ these automatically copy, so copy=True has no effect
+ raise on an except if raise == True
+ """
+
+ # if we are passed a datetime64[ns, tz]
+ if com.is_datetime64tz_dtype(dtype):
+ dtype = DatetimeTZDtype(dtype)
+
+ values = self.values
+ if getattr(values,'tz',None) is None:
+ values = DatetimeIndex(values).tz_localize('UTC')
+ values = values.tz_convert(dtype.tz)
+ return self.make_block(values)
+
+ # delegate
+ return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
+
+
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 8da821a1fbb9a..0794ae5003983 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -1072,6 +1072,20 @@ def test_constructor_with_datetime_tz(self):
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
+ result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
+ assert_series_equal(result, s)
+
+ # astype - datetime64[ns, tz]
+ result = Series(s.values).astype('datetime64[ns, US/Eastern]')
+ assert_series_equal(result, s)
+
+ result = Series(s.values).astype(s.dtype)
+ assert_series_equal(result, s)
+
+ result = s.astype('datetime64[ns, CET]')
+ expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET'))
+ assert_series_equal(result, expected)
+
# short str
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
| add ability to astype using the new dtype
| https://api.github.com/repos/pandas-dev/pandas/pulls/11003 | 2015-09-05T18:15:55Z | 2015-09-07T20:20:50Z | 2015-09-07T20:20:50Z | 2015-09-07T20:20:50Z |
DOC: fix NaNs in categories | diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 534ab0e343398..ddd4fb81ed1f1 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -149,8 +149,8 @@ Using ``.describe()`` on categorical data will produce similar output to a `Seri
.. ipython:: python
- cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan] )
- df = pd.DataFrame({"cat":cat, "s":["a","c","c",np.nan]})
+ cat = pd.Categorical(["a", "c", "c", np.nan], categories=["b", "a", "c"])
+ df = pd.DataFrame({"cat":cat, "s":["a", "c", "c", np.nan]})
df.describe()
df["cat"].describe()
@@ -642,10 +642,10 @@ a code of ``-1``.
.. ipython:: python
- s = pd.Series(["a","b",np.nan,"a"], dtype="category")
+ s = pd.Series(["a", "b", np.nan, "a"], dtype="category")
# only two categories
s
- s.codes
+ s.cat.codes
Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Series.fillna`,
@@ -653,8 +653,7 @@ Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Seri
.. ipython:: python
- c = pd.Series(["a","b",np.nan], dtype="category")
- s = pd.Series(c)
+ s = pd.Series(["a", "b", np.nan], dtype="category")
s
pd.isnull(s)
s.fillna("a")
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
index 1992288fd4d00..d1a2ba59d7fdf 100644
--- a/doc/source/remote_data.rst
+++ b/doc/source/remote_data.rst
@@ -62,6 +62,7 @@ Yahoo! Finance
--------------
.. ipython:: python
+ :okwarning:
import pandas.io.data as web
import datetime
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 29b955a55fcc9..9795c082ddb98 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1278,17 +1278,12 @@ frequency. Arithmetic is not allowed between ``Period`` with different ``freq``
.. ipython:: python
p = Period('2012', freq='A-DEC')
-
p + 1
-
p - 3
-
p = Period('2012-01', freq='2M')
-
p + 2
-
p - 1
-
+ @okexcept
p == Period('2012-01', freq='3M')
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index 4378d182b3128..2eaf143a3e0b8 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -387,6 +387,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used.
np.random.seed(123456)
.. ipython:: python
+ :okwarning:
df = pd.DataFrame(np.random.rand(10,5))
plt.figure();
| Fix warning + error in the categorical docs
| https://api.github.com/repos/pandas-dev/pandas/pulls/10999 | 2015-09-05T11:43:32Z | 2015-09-05T14:17:28Z | 2015-09-05T14:17:28Z | 2015-09-05T14:17:28Z |
asv bench cleanup - groupby | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index f1ac09b8b2516..138977a29463e 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -3,7 +3,7 @@
from itertools import product
-class groupby_agg_builtins1(object):
+class groupby_agg_builtins(object):
goal_time = 0.2
def setup(self):
@@ -14,18 +14,11 @@ def setup(self):
def time_groupby_agg_builtins1(self):
self.df.groupby('jim').agg([sum, min, max])
-
-class groupby_agg_builtins2(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(27182)
- self.n = 100000
- self.df = DataFrame(np.random.randint(1, (self.n / 100), (self.n, 3)), columns=['jim', 'joe', 'jolie'])
-
def time_groupby_agg_builtins2(self):
self.df.groupby(['jim', 'joe']).agg([sum, min, max])
+#----------------------------------------------------------------------
+# dict return values
class groupby_apply_dict_return(object):
goal_time = 0.2
@@ -39,33 +32,49 @@ def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(self.f)
-class groupby_dt_size(object):
+#----------------------------------------------------------------------
+# First / last functions
+
+class groupby_first_last(object):
goal_time = 0.2
def setup(self):
- self.n = 100000
- self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]')
- self.dates = (np.datetime64('now') + self.offsets)
- self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'value1': np.random.randn(self.n), 'value2': np.random.randn(self.n), 'value3': np.random.randn(self.n), 'dates': self.dates, })
+ self.labels = np.arange(10000).repeat(10)
+ self.data = Series(randn(len(self.labels)))
+ self.data[::3] = np.nan
+ self.data[1::3] = np.nan
+ self.data2 = Series(randn(len(self.labels)), dtype='float32')
+ self.data2[::3] = np.nan
+ self.data2[1::3] = np.nan
+ self.labels = self.labels.take(np.random.permutation(len(self.labels)))
- def time_groupby_dt_size(self):
- self.df.groupby(['dates']).size()
+ def time_groupby_first_float32(self):
+ self.data2.groupby(self.labels).first()
+ def time_groupby_first_float64(self):
+ self.data.groupby(self.labels).first()
-class groupby_dt_timegrouper_size(object):
- goal_time = 0.2
+ def time_groupby_last_float32(self):
+ self.data2.groupby(self.labels).last()
- def setup(self):
- self.n = 100000
- self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]')
- self.dates = (np.datetime64('now') + self.offsets)
- self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'value1': np.random.randn(self.n), 'value2': np.random.randn(self.n), 'value3': np.random.randn(self.n), 'dates': self.dates, })
+ def time_groupby_last_float64(self):
+ self.data.groupby(self.labels).last()
- def time_groupby_dt_timegrouper_size(self):
- self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
+ def time_groupby_nth_float32_any(self):
+ self.data2.groupby(self.labels).nth(0, dropna='all')
+
+ def time_groupby_nth_float32_none(self):
+ self.data2.groupby(self.labels).nth(0)
+
+ def time_groupby_nth_float64_any(self):
+ self.data.groupby(self.labels).nth(0, dropna='all')
+
+ def time_groupby_nth_float64_none(self):
+ self.data.groupby(self.labels).nth(0)
+# with datetimes (GH7555)
-class groupby_first_datetimes(object):
+class groupby_first_last_datetimes(object):
goal_time = 0.2
def setup(self):
@@ -74,50 +83,37 @@ def setup(self):
def time_groupby_first_datetimes(self):
self.df.groupby('b').first()
+ def time_groupby_last_datetimes(self):
+ self.df.groupby('b').last()
-class groupby_first_float32(object):
- goal_time = 0.2
-
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ def time_groupby_nth_datetimes_any(self):
+ self.df.groupby('b').nth(0, dropna='all')
- def time_groupby_first_float32(self):
- self.data2.groupby(self.labels).first()
+ def time_groupby_nth_datetimes_none(self):
+ self.df.groupby('b').nth(0)
-class groupby_first_float64(object):
+class groupby_first_last_object(object):
goal_time = 0.2
def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000)})
- def time_groupby_first_float64(self):
- self.data.groupby(self.labels).first()
+ def time_groupby_first_object(self):
+ self.df.groupby('b').first()
+ def time_groupby_last_object(self):
+ self.df.groupby('b').last()
-class groupby_first_object(object):
- goal_time = 0.2
+ def time_groupby_nth_object_any(self):
+ self.df.groupby('b').nth(0, dropna='any')
- def setup(self):
- self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), })
+ def time_groupby_nth_object_none(self):
+ self.df.groupby('b').nth(0)
- def time_groupby_first_object(self):
- self.df.groupby('b').first()
+#----------------------------------------------------------------------
+# DataFrame Apply overhead
class groupby_frame_apply(object):
goal_time = 0.2
@@ -128,28 +124,18 @@ def setup(self):
self.labels2 = np.random.randint(0, 3, size=self.N)
self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), })
- def time_groupby_frame_apply(self):
- self.df.groupby(['key', 'key2']).apply(self.f)
-
def f(self, g):
return 1
-
-class groupby_frame_apply_overhead(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 10000
- self.labels = np.random.randint(0, 2000, size=self.N)
- self.labels2 = np.random.randint(0, 3, size=self.N)
- self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), })
+ def time_groupby_frame_apply(self):
+ self.df.groupby(['key', 'key2']).apply(self.f)
def time_groupby_frame_apply_overhead(self):
self.df.groupby('key').apply(self.f)
- def f(self, g):
- return 1
+#----------------------------------------------------------------------
+# 2d grouping, aggregate many columns
class groupby_frame_cython_many_columns(object):
goal_time = 0.2
@@ -158,53 +144,67 @@ def setup(self):
self.labels = np.random.randint(0, 100, size=1000)
self.df = DataFrame(randn(1000, 1000))
- def time_groupby_frame_cython_many_columns(self):
+ def time_sum(self):
self.df.groupby(self.labels).sum()
-class groupby_frame_median(object):
+#----------------------------------------------------------------------
+# single key, long, integer key
+
+class groupby_frame_singlekey_integer(object):
goal_time = 0.2
def setup(self):
- self.data = np.random.randn(100000, 2)
+ self.data = np.random.randn(100000, 1)
self.labels = np.random.randint(0, 1000, size=100000)
self.df = DataFrame(self.data)
- def time_groupby_frame_median(self):
- self.df.groupby(self.labels).median()
+ def time_sum(self):
+ self.df.groupby(self.labels).sum()
+
+#----------------------------------------------------------------------
+# median
-class groupby_frame_nth_any(object):
+class groupby_frame(object):
goal_time = 0.2
def setup(self):
- self.df = DataFrame(np.random.randint(1, 100, (10000, 2)))
+ self.data = np.random.randn(100000, 2)
+ self.labels = np.random.randint(0, 1000, size=100000)
+ self.df = DataFrame(self.data)
+
+ def time_groupby_frame_median(self):
+ self.df.groupby(self.labels).median()
+
+ def time_groupby_simple_compress_timing(self):
+ self.df.groupby(self.labels).mean()
- def time_groupby_frame_nth_any(self):
- self.df.groupby(0).nth(0, dropna='any')
+#----------------------------------------------------------------------
+# DataFrame nth
-class groupby_frame_nth_none(object):
+class groupby_nth(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randint(1, 100, (10000, 2)))
+ def time_groupby_frame_nth_any(self):
+ self.df.groupby(0).nth(0, dropna='any')
+
def time_groupby_frame_nth_none(self):
self.df.groupby(0).nth(0)
+ def time_groupby_series_nth_any(self):
+ self.df[1].groupby(self.df[0]).nth(0, dropna='any')
-class groupby_frame_singlekey_integer(object):
- goal_time = 0.2
-
- def setup(self):
- self.data = np.random.randn(100000, 1)
- self.labels = np.random.randint(0, 1000, size=100000)
- self.df = DataFrame(self.data)
+ def time_groupby_series_nth_none(self):
+ self.df[1].groupby(self.df[0]).nth(0)
- def time_groupby_frame_singlekey_integer(self):
- self.df.groupby(self.labels).sum()
+#----------------------------------------------------------------------
+# groupby_indices replacement, chop up Series
class groupby_indices(object):
goal_time = 0.2
@@ -240,70 +240,8 @@ def time_groupby_int64_overflow(self):
self.df.groupby(list('abcde')).max()
-class groupby_int_count(object):
- goal_time = 0.2
-
- def setup(self):
- self.n = 10000
- self.df = DataFrame({'key1': randint(0, 500, size=self.n), 'key2': randint(0, 100, size=self.n), 'ints': randint(0, 1000, size=self.n), 'ints2': randint(0, 1000, size=self.n), })
-
- def time_groupby_int_count(self):
- self.df.groupby(['key1', 'key2']).count()
-
-
-class groupby_last_datetimes(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), })
-
- def time_groupby_last_datetimes(self):
- self.df.groupby('b').last()
-
-
-class groupby_last_float32(object):
- goal_time = 0.2
-
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
-
- def time_groupby_last_float32(self):
- self.data2.groupby(self.labels).last()
-
-
-class groupby_last_float64(object):
- goal_time = 0.2
-
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
-
- def time_groupby_last_float64(self):
- self.data.groupby(self.labels).last()
-
-
-class groupby_last_object(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), })
-
- def time_groupby_last_object(self):
- self.df.groupby('b').last()
-
+#----------------------------------------------------------------------
+# count() speed
class groupby_multi_count(object):
goal_time = 0.2
@@ -318,38 +256,37 @@ def setup(self):
self.value2[(np.random.rand(self.n) > 0.5)] = np.nan
self.obj = tm.choice(list('ab'), size=self.n).astype(object)
self.obj[(np.random.randn(self.n) > 0.5)] = np.nan
- self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'dates': self.dates, 'value2': self.value2, 'value3': np.random.randn(self.n), 'ints': np.random.randint(0, 1000, size=self.n), 'obj': self.obj, 'offsets': self.offsets, })
+ self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n),
+ 'key2': np.random.randint(0, 100, size=self.n),
+ 'dates': self.dates,
+ 'value2': self.value2,
+ 'value3': np.random.randn(self.n),
+ 'ints': np.random.randint(0, 1000, size=self.n),
+ 'obj': self.obj,
+ 'offsets': self.offsets, })
def time_groupby_multi_count(self):
self.df.groupby(['key1', 'key2']).count()
-class groupby_multi_cython(object):
+class groupby_int_count(object):
goal_time = 0.2
def setup(self):
- self.N = 100000
- self.ngroups = 100
- self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
- self.simple_series = Series(np.random.randn(self.N))
- self.key1 = self.df['key1']
-
- def time_groupby_multi_cython(self):
- self.df.groupby(['key1', 'key2']).sum()
+ self.n = 10000
+ self.df = DataFrame({'key1': randint(0, 500, size=self.n),
+ 'key2': randint(0, 100, size=self.n),
+ 'ints': randint(0, 1000, size=self.n),
+ 'ints2': randint(0, 1000, size=self.n), })
- def get_test_data(self, ngroups=100, n=100000):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
+ def time_groupby_int_count(self):
+ self.df.groupby(['key1', 'key2']).count()
- def f(self):
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+#----------------------------------------------------------------------
+# group with different functions per column
-class groupby_multi_different_functions(object):
+class groupby_agg_multi(object):
goal_time = 0.2
def setup(self):
@@ -358,19 +295,10 @@ def setup(self):
self.df = DataFrame({'key1': self.fac1.take(np.random.randint(0, 3, size=100000)), 'key2': self.fac2.take(np.random.randint(0, 2, size=100000)), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), })
def time_groupby_multi_different_functions(self):
- self.df.groupby(['key1', 'key2']).agg({'value1': 'mean', 'value2': 'var', 'value3': 'sum', })
-
-
-class groupby_multi_different_numpy_functions(object):
- goal_time = 0.2
-
- def setup(self):
- self.fac1 = np.array(['A', 'B', 'C'], dtype='O')
- self.fac2 = np.array(['one', 'two'], dtype='O')
- self.df = DataFrame({'key1': self.fac1.take(np.random.randint(0, 3, size=100000)), 'key2': self.fac2.take(np.random.randint(0, 2, size=100000)), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), })
+ self.df.groupby(['key1', 'key2']).agg({'value1': 'mean', 'value2': 'var', 'value3': 'sum'})
def time_groupby_multi_different_numpy_functions(self):
- self.df.groupby(['key1', 'key2']).agg({'value1': np.mean, 'value2': np.var, 'value3': np.sum, })
+ self.df.groupby(['key1', 'key2']).agg({'value1': np.mean, 'value2': np.var, 'value3': np.sum})
class groupby_multi_index(object):
@@ -389,7 +317,7 @@ def time_groupby_multi_index(self):
self.df.groupby(list('abcd')).max()
-class groupby_multi_python(object):
+class groupby_multi(object):
goal_time = 0.2
def setup(self):
@@ -399,9 +327,6 @@ def setup(self):
self.simple_series = Series(np.random.randn(self.N))
self.key1 = self.df['key1']
- def time_groupby_multi_python(self):
- self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum()))
-
def get_test_data(self, ngroups=100, n=100000):
self.unique_groups = range(self.ngroups)
self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
@@ -413,33 +338,26 @@ def get_test_data(self, ngroups=100, n=100000):
def f(self):
self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ def time_groupby_multi_cython(self):
+ self.df.groupby(['key1', 'key2']).sum()
-class groupby_multi_series_op(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 100000
- self.ngroups = 100
- self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
- self.simple_series = Series(np.random.randn(self.N))
- self.key1 = self.df['key1']
+ def time_groupby_multi_python(self):
+ self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum()))
def time_groupby_multi_series_op(self):
self.df.groupby(['key1', 'key2'])['data1'].agg(np.std)
- def get_test_data(self, ngroups=100, n=100000):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
+ def time_groupby_series_simple_cython(self):
+ self.simple_series.groupby(self.key1).sum()
- def f(self):
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ def time_groupby_series_simple_rank(self):
+ self.df.groupby('key1').rank(pct=True)
-class groupby_multi_size(object):
+#----------------------------------------------------------------------
+# size() speed
+
+class groupby_size(object):
goal_time = 0.2
def setup(self):
@@ -451,22 +369,17 @@ def setup(self):
def time_groupby_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
+ def time_groupby_dt_size(self):
+ self.df.groupby(['dates']).size()
-class groupby_ngroups_10000_all(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_groupby_dt_timegrouper_size(self):
+ self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
- def time_groupby_ngroups_10000_all(self):
- self.df.groupby('value')['timestamp'].all()
+#----------------------------------------------------------------------
+# groupby with a variable value for ngroups
-class groupby_ngroups_10000_any(object):
+class groupby_ngroups_10000(object):
goal_time = 0.2
def setup(self):
@@ -476,809 +389,101 @@ def setup(self):
self.rng = np.arange(self.ngroups)
self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
- def time_groupby_ngroups_10000_any(self):
- self.df.groupby('value')['timestamp'].any()
-
-
-class groupby_ngroups_10000_count(object):
- goal_time = 0.2
+ def time_all(self):
+ self.df.groupby('value')['timestamp'].all()
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_any(self):
+ self.df.groupby('value')['timestamp'].any()
- def time_groupby_ngroups_10000_count(self):
+ def time_count(self):
self.df.groupby('value')['timestamp'].count()
-
-class groupby_ngroups_10000_cumcount(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_cumcount(self):
+ def time_cumcount(self):
self.df.groupby('value')['timestamp'].cumcount()
-
-class groupby_ngroups_10000_cummax(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_cummax(self):
+ def time_cummax(self):
self.df.groupby('value')['timestamp'].cummax()
-
-class groupby_ngroups_10000_cummin(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_cummin(self):
+ def time_cummin(self):
self.df.groupby('value')['timestamp'].cummin()
-
-class groupby_ngroups_10000_cumprod(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_cumprod(self):
+ def time_cumprod(self):
self.df.groupby('value')['timestamp'].cumprod()
-
-class groupby_ngroups_10000_cumsum(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_cumsum(self):
+ def time_cumsum(self):
self.df.groupby('value')['timestamp'].cumsum()
-
-class groupby_ngroups_10000_describe(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_describe(self):
+ def time_describe(self):
self.df.groupby('value')['timestamp'].describe()
-
-class groupby_ngroups_10000_diff(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_diff(self):
+ def time_diff(self):
self.df.groupby('value')['timestamp'].diff()
-
-class groupby_ngroups_10000_first(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_first(self):
+ def time_first(self):
self.df.groupby('value')['timestamp'].first()
-
-class groupby_ngroups_10000_head(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_head(self):
+ def time_head(self):
self.df.groupby('value')['timestamp'].head()
-
-class groupby_ngroups_10000_last(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_last(self):
+ def time_last(self):
self.df.groupby('value')['timestamp'].last()
-
-class groupby_ngroups_10000_mad(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_mad(self):
+ def time_mad(self):
self.df.groupby('value')['timestamp'].mad()
-
-class groupby_ngroups_10000_max(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_max(self):
+ def time_max(self):
self.df.groupby('value')['timestamp'].max()
-
-class groupby_ngroups_10000_mean(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_mean(self):
+ def time_mean(self):
self.df.groupby('value')['timestamp'].mean()
-
-class groupby_ngroups_10000_median(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_median(self):
+ def time_median(self):
self.df.groupby('value')['timestamp'].median()
-
-class groupby_ngroups_10000_min(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_min(self):
+ def time_min(self):
self.df.groupby('value')['timestamp'].min()
-
-class groupby_ngroups_10000_nunique(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_nunique(self):
+ def time_nunique(self):
self.df.groupby('value')['timestamp'].nunique()
+ def time_pct_change(self):
+ self.df.groupby('value')['timestamp'].pct_change()
-class groupby_ngroups_10000_pct_change(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_pct_change(self):
- self.df.groupby('value')['timestamp'].pct_change()
-
-
-class groupby_ngroups_10000_prod(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_prod(self):
- self.df.groupby('value')['timestamp'].prod()
-
-
-class groupby_ngroups_10000_rank(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_rank(self):
- self.df.groupby('value')['timestamp'].rank()
-
-
-class groupby_ngroups_10000_sem(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_sem(self):
- self.df.groupby('value')['timestamp'].sem()
-
-
-class groupby_ngroups_10000_size(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_size(self):
- self.df.groupby('value')['timestamp'].size()
-
-
-class groupby_ngroups_10000_skew(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_skew(self):
- self.df.groupby('value')['timestamp'].skew()
-
-
-class groupby_ngroups_10000_std(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_std(self):
- self.df.groupby('value')['timestamp'].std()
-
-
-class groupby_ngroups_10000_sum(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_sum(self):
- self.df.groupby('value')['timestamp'].sum()
-
-
-class groupby_ngroups_10000_tail(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_tail(self):
- self.df.groupby('value')['timestamp'].tail()
-
-
-class groupby_ngroups_10000_unique(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_unique(self):
- self.df.groupby('value')['timestamp'].unique()
-
-
-class groupby_ngroups_10000_value_counts(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_value_counts(self):
- self.df.groupby('value')['timestamp'].value_counts()
-
-
-class groupby_ngroups_10000_var(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 10000
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_10000_var(self):
- self.df.groupby('value')['timestamp'].var()
-
-
-class groupby_ngroups_100_all(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_all(self):
- self.df.groupby('value')['timestamp'].all()
-
-
-class groupby_ngroups_100_any(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_any(self):
- self.df.groupby('value')['timestamp'].any()
-
-
-class groupby_ngroups_100_count(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_count(self):
- self.df.groupby('value')['timestamp'].count()
-
-
-class groupby_ngroups_100_cumcount(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_cumcount(self):
- self.df.groupby('value')['timestamp'].cumcount()
-
-
-class groupby_ngroups_100_cummax(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_cummax(self):
- self.df.groupby('value')['timestamp'].cummax()
-
-
-class groupby_ngroups_100_cummin(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_cummin(self):
- self.df.groupby('value')['timestamp'].cummin()
-
-
-class groupby_ngroups_100_cumprod(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_cumprod(self):
- self.df.groupby('value')['timestamp'].cumprod()
-
-
-class groupby_ngroups_100_cumsum(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_cumsum(self):
- self.df.groupby('value')['timestamp'].cumsum()
-
-
-class groupby_ngroups_100_describe(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_describe(self):
- self.df.groupby('value')['timestamp'].describe()
-
-
-class groupby_ngroups_100_diff(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_diff(self):
- self.df.groupby('value')['timestamp'].diff()
-
-
-class groupby_ngroups_100_first(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_first(self):
- self.df.groupby('value')['timestamp'].first()
-
-
-class groupby_ngroups_100_head(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_head(self):
- self.df.groupby('value')['timestamp'].head()
-
-
-class groupby_ngroups_100_last(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_last(self):
- self.df.groupby('value')['timestamp'].last()
-
-
-class groupby_ngroups_100_mad(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_mad(self):
- self.df.groupby('value')['timestamp'].mad()
-
-
-class groupby_ngroups_100_max(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_max(self):
- self.df.groupby('value')['timestamp'].max()
-
-
-class groupby_ngroups_100_mean(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_mean(self):
- self.df.groupby('value')['timestamp'].mean()
-
-
-class groupby_ngroups_100_median(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_median(self):
- self.df.groupby('value')['timestamp'].median()
-
-
-class groupby_ngroups_100_min(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_min(self):
- self.df.groupby('value')['timestamp'].min()
-
-
-class groupby_ngroups_100_nunique(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_nunique(self):
- self.df.groupby('value')['timestamp'].nunique()
-
-
-class groupby_ngroups_100_pct_change(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_pct_change(self):
- self.df.groupby('value')['timestamp'].pct_change()
-
-
-class groupby_ngroups_100_prod(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_prod(self):
- self.df.groupby('value')['timestamp'].prod()
-
-
-class groupby_ngroups_100_rank(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_rank(self):
- self.df.groupby('value')['timestamp'].rank()
-
-
-class groupby_ngroups_100_sem(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_sem(self):
- self.df.groupby('value')['timestamp'].sem()
-
-
-class groupby_ngroups_100_size(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_size(self):
- self.df.groupby('value')['timestamp'].size()
-
-
-class groupby_ngroups_100_skew(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_skew(self):
- self.df.groupby('value')['timestamp'].skew()
-
-
-class groupby_ngroups_100_std(object):
- goal_time = 0.2
-
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
-
- def time_groupby_ngroups_100_std(self):
- self.df.groupby('value')['timestamp'].std()
+ def time_prod(self):
+ self.df.groupby('value')['timestamp'].prod()
+ def time_rank(self):
+ self.df.groupby('value')['timestamp'].rank()
-class groupby_ngroups_100_sum(object):
- goal_time = 0.2
+ def time_sem(self):
+ self.df.groupby('value')['timestamp'].sem()
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_size(self):
+ self.df.groupby('value')['timestamp'].size()
+
+ def time_skew(self):
+ self.df.groupby('value')['timestamp'].skew()
+
+ def time_std(self):
+ self.df.groupby('value')['timestamp'].std()
- def time_groupby_ngroups_100_sum(self):
+ def time_sum(self):
self.df.groupby('value')['timestamp'].sum()
+ def time_tail(self):
+ self.df.groupby('value')['timestamp'].tail()
-class groupby_ngroups_100_tail(object):
- goal_time = 0.2
+ def time_unique(self):
+ self.df.groupby('value')['timestamp'].unique()
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_value_counts(self):
+ self.df.groupby('value')['timestamp'].value_counts()
- def time_groupby_ngroups_100_tail(self):
- self.df.groupby('value')['timestamp'].tail()
+ def time_var(self):
+ self.df.groupby('value')['timestamp'].var()
-class groupby_ngroups_100_unique(object):
+class groupby_ngroups_100(object):
goal_time = 0.2
def setup(self):
@@ -1288,145 +493,127 @@ def setup(self):
self.rng = np.arange(self.ngroups)
self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
- def time_groupby_ngroups_100_unique(self):
- self.df.groupby('value')['timestamp'].unique()
-
+ def time_all(self):
+ self.df.groupby('value')['timestamp'].all()
-class groupby_ngroups_100_value_counts(object):
- goal_time = 0.2
+ def time_any(self):
+ self.df.groupby('value')['timestamp'].any()
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_count(self):
+ self.df.groupby('value')['timestamp'].count()
- def time_groupby_ngroups_100_value_counts(self):
- self.df.groupby('value')['timestamp'].value_counts()
+ def time_cumcount(self):
+ self.df.groupby('value')['timestamp'].cumcount()
+ def time_cummax(self):
+ self.df.groupby('value')['timestamp'].cummax()
-class groupby_ngroups_100_var(object):
- goal_time = 0.2
+ def time_cummin(self):
+ self.df.groupby('value')['timestamp'].cummin()
- def setup(self):
- np.random.seed(1234)
- self.ngroups = 100
- self.size = (self.ngroups * 2)
- self.rng = np.arange(self.ngroups)
- self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size)))
+ def time_cumprod(self):
+ self.df.groupby('value')['timestamp'].cumprod()
- def time_groupby_ngroups_100_var(self):
- self.df.groupby('value')['timestamp'].var()
+ def time_cumsum(self):
+ self.df.groupby('value')['timestamp'].cumsum()
+ def time_describe(self):
+ self.df.groupby('value')['timestamp'].describe()
-class groupby_nth_datetimes_any(object):
- goal_time = 0.2
+ def time_diff(self):
+ self.df.groupby('value')['timestamp'].diff()
- def setup(self):
- self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), })
+ def time_first(self):
+ self.df.groupby('value')['timestamp'].first()
- def time_groupby_nth_datetimes_any(self):
- self.df.groupby('b').nth(0, dropna='all')
+ def time_head(self):
+ self.df.groupby('value')['timestamp'].head()
+ def time_last(self):
+ self.df.groupby('value')['timestamp'].last()
-class groupby_nth_datetimes_none(object):
- goal_time = 0.2
+ def time_mad(self):
+ self.df.groupby('value')['timestamp'].mad()
- def setup(self):
- self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), })
+ def time_max(self):
+ self.df.groupby('value')['timestamp'].max()
- def time_groupby_nth_datetimes_none(self):
- self.df.groupby('b').nth(0)
+ def time_mean(self):
+ self.df.groupby('value')['timestamp'].mean()
+ def time_median(self):
+ self.df.groupby('value')['timestamp'].median()
-class groupby_nth_float32_any(object):
- goal_time = 0.2
+ def time_min(self):
+ self.df.groupby('value')['timestamp'].min()
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ def time_nunique(self):
+ self.df.groupby('value')['timestamp'].nunique()
- def time_groupby_nth_float32_any(self):
- self.data2.groupby(self.labels).nth(0, dropna='all')
+ def time_pct_change(self):
+ self.df.groupby('value')['timestamp'].pct_change()
+ def time_prod(self):
+ self.df.groupby('value')['timestamp'].prod()
-class groupby_nth_float32_none(object):
- goal_time = 0.2
+ def time_rank(self):
+ self.df.groupby('value')['timestamp'].rank()
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ def time_sem(self):
+ self.df.groupby('value')['timestamp'].sem()
- def time_groupby_nth_float32_none(self):
- self.data2.groupby(self.labels).nth(0)
+ def time_size(self):
+ self.df.groupby('value')['timestamp'].size()
+ def time_skew(self):
+ self.df.groupby('value')['timestamp'].skew()
-class groupby_nth_float64_any(object):
- goal_time = 0.2
+ def time_std(self):
+ self.df.groupby('value')['timestamp'].std()
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ def time_sum(self):
+ self.df.groupby('value')['timestamp'].sum()
- def time_groupby_nth_float64_any(self):
- self.data.groupby(self.labels).nth(0, dropna='all')
+ def time_tail(self):
+ self.df.groupby('value')['timestamp'].tail()
+ def time_unique(self):
+ self.df.groupby('value')['timestamp'].unique()
-class groupby_nth_float64_none(object):
- goal_time = 0.2
+ def time_value_counts(self):
+ self.df.groupby('value')['timestamp'].value_counts()
- def setup(self):
- self.labels = np.arange(10000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.data[::3] = np.nan
- self.data[1::3] = np.nan
- self.data2 = Series(randn(len(self.labels)), dtype='float32')
- self.data2[::3] = np.nan
- self.data2[1::3] = np.nan
- self.labels = self.labels.take(np.random.permutation(len(self.labels)))
+ def time_var(self):
+ self.df.groupby('value')['timestamp'].var()
- def time_groupby_nth_float64_none(self):
- self.data.groupby(self.labels).nth(0)
+#----------------------------------------------------------------------
+# Series.value_counts
-class groupby_nth_object_any(object):
+class series_value_counts(object):
goal_time = 0.2
def setup(self):
- self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), })
+ self.s = Series(np.random.randint(0, 1000, size=100000))
+ self.s2 = self.s.astype(float)
- def time_groupby_nth_object_any(self):
- self.df.groupby('b').nth(0, dropna='any')
+ self.K = 1000
+ self.N = 100000
+ self.uniques = tm.makeStringIndex(self.K).values
+ self.s3 = Series(np.tile(self.uniques, (self.N // self.K)))
+ def time_value_counts_int64(self):
+ self.s.value_counts()
-class groupby_nth_object_none(object):
- goal_time = 0.2
+ def time_value_counts_float64(self):
+ self.s2.value_counts()
- def setup(self):
- self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), })
+ def time_value_counts_strings(self):
+ self.s.value_counts()
- def time_groupby_nth_object_none(self):
- self.df.groupby('b').nth(0)
+#----------------------------------------------------------------------
+# pivot_table
class groupby_pivot_table(object):
goal_time = 0.2
@@ -1442,62 +629,8 @@ def time_groupby_pivot_table(self):
self.df.pivot_table(index='key1', columns=['key2', 'key3'])
-class groupby_series_nth_any(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(np.random.randint(1, 100, (10000, 2)))
-
- def time_groupby_series_nth_any(self):
- self.df[1].groupby(self.df[0]).nth(0, dropna='any')
-
-
-class groupby_series_nth_none(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(np.random.randint(1, 100, (10000, 2)))
-
- def time_groupby_series_nth_none(self):
- self.df[1].groupby(self.df[0]).nth(0)
-
-
-class groupby_series_simple_cython(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 100000
- self.ngroups = 100
- self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
- self.simple_series = Series(np.random.randn(self.N))
- self.key1 = self.df['key1']
-
- def time_groupby_series_simple_cython(self):
- self.df.groupby('key1').rank(pct=True)
-
- def get_test_data(self, ngroups=100, n=100000):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
-
- def f(self):
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
-
-
-class groupby_simple_compress_timing(object):
- goal_time = 0.2
-
- def setup(self):
- self.data = np.random.randn(1000000, 2)
- self.labels = np.random.randint(0, 1000, size=1000000)
- self.df = DataFrame(self.data)
-
- def time_groupby_simple_compress_timing(self):
- self.df.groupby(self.labels).mean()
-
+#----------------------------------------------------------------------
+# Sum booleans #2692
class groupby_sum_booleans(object):
goal_time = 0.2
@@ -1510,6 +643,9 @@ def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
+#----------------------------------------------------------------------
+# multi-indexed group sum #9049
+
class groupby_sum_multiindex(object):
goal_time = 0.2
@@ -1521,6 +657,9 @@ def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
+#-------------------------------------------------------------------------------
+# Transform testing
+
class groupby_transform(object):
goal_time = 0.2
@@ -1535,7 +674,9 @@ def setup(self):
self.secid_max = int('F0000000', 16)
self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1))
self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step))
- self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
+ self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids],
+ labels=[[i for i in range(self.n_dates) for _ in range(self.n_securities)], (range(self.n_securities) * self.n_dates)],
+ names=['date', 'security_id'])
self.n_data = len(self.data_index)
self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))])
self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns)
@@ -1550,8 +691,11 @@ def setup(self):
def time_groupby_transform(self):
self.data.groupby(level='security_id').transform(self.f_fillna)
+ def time_groupby_transform_ufunc(self):
+ self.data.groupby(level='date').transform(np.max)
-class groupby_transform_multi_key1(object):
+
+class groupby_transform_multi_key(object):
goal_time = 0.2
def setup(self):
@@ -1628,66 +772,3 @@ def setup(self):
def time_groupby_transform_series2(self):
self.df.groupby('id')['val'].transform(np.mean)
-
-
-class groupby_transform_ufunc(object):
- goal_time = 0.2
-
- def setup(self):
- self.n_dates = 400
- self.n_securities = 250
- self.n_columns = 3
- self.share_na = 0.1
- self.dates = date_range('1997-12-31', periods=self.n_dates, freq='B')
- self.dates = Index(map((lambda x: (((x.year * 10000) + (x.month * 100)) + x.day)), self.dates))
- self.secid_min = int('10000000', 16)
- self.secid_max = int('F0000000', 16)
- self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1))
- self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step))
- self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
- self.n_data = len(self.data_index)
- self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))])
- self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns)
- self.step = int((self.n_data * self.share_na))
- for column_index in range(self.n_columns):
- self.index = column_index
- while (self.index < self.n_data):
- self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan)
- self.index += self.step
- self.f_fillna = (lambda x: x.fillna(method='pad'))
-
- def time_groupby_transform_ufunc(self):
- self.data.groupby(level='date').transform(np.max)
-
-
-class series_value_counts_float64(object):
- goal_time = 0.2
-
- def setup(self):
- self.s = Series(np.random.randint(0, 1000, size=100000)).astype(float)
-
- def time_series_value_counts_float64(self):
- self.s.value_counts()
-
-
-class series_value_counts_int64(object):
- goal_time = 0.2
-
- def setup(self):
- self.s = Series(np.random.randint(0, 1000, size=100000))
-
- def time_series_value_counts_int64(self):
- self.s.value_counts()
-
-
-class series_value_counts_strings(object):
- goal_time = 0.2
-
- def setup(self):
- self.K = 1000
- self.N = 100000
- self.uniques = tm.makeStringIndex(self.K).values
- self.s = Series(np.tile(self.uniques, (self.N // self.K)))
-
- def time_series_value_counts_strings(self):
- self.s.value_counts()
| Clean-up of the groupby benchmarks.
This is an example of how they can be cleaned up (grouping benchmarks with the same setup in common classes, removing a lot of the setup functions in this way)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10998 | 2015-09-05T11:37:01Z | 2015-09-12T23:46:46Z | 2015-09-12T23:46:46Z | 2015-09-13T00:29:16Z |
DOC: Update pip reference links | diff --git a/README.md b/README.md
index 947dfc5928249..fbac24a34bfd2 100644
--- a/README.md
+++ b/README.md
@@ -221,7 +221,7 @@ cloning the git repo), execute:
python setup.py install
```
-or for installing in [development mode](http://www.pip-installer.org/en/latest/usage.html):
+or for installing in [development mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs):
```sh
python setup.py develop
@@ -229,7 +229,7 @@ python setup.py develop
Alternatively, you can use `pip` if you want all the dependencies pulled
in automatically (the `-e` option is for installing it in [development
-mode](http://www.pip-installer.org/en/latest/usage.html)):
+mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs)):
```sh
pip install -e .
| Change to link to https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs
| https://api.github.com/repos/pandas-dev/pandas/pulls/10997 | 2015-09-05T03:21:30Z | 2015-09-05T10:09:02Z | 2015-09-05T10:09:02Z | 2017-08-25T04:52:33Z |
DOC: Improve reindex examples and docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d3a63f9f5d851..f2a724361df4a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1775,7 +1775,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
- Method to use for filling holes in reindexed DataFrame:
+ method to use for filling holes in reindexed DataFrame.
+ Please note: this is only applicable to DataFrames/Series with a
+ monotonically increasing/decreasing index.
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next valid
* backfill / bfill: use next valid observation to fill gap
@@ -1799,7 +1801,118 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
Examples
--------
- >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C'])
+
+ Create a dataframe with some fictional data.
+
+ >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
+ >>> df = pd.DataFrame({
+ ... 'http_status': [200,200,404,404,301],
+ ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
+ ... index=index)
+ >>> df
+ http_status response_time
+ Firefox 200 0.04
+ Chrome 200 0.02
+ Safari 404 0.07
+ IE10 404 0.08
+ Konqueror 301 1.00
+
+ Create a new index and reindex the dataframe. By default
+ values in the new index that do not have corresponding
+ records in the dataframe are assigned ``NaN``.
+
+ >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
+ ... 'Chrome']
+ >>> df.reindex(new_index)
+ http_status response_time
+ Safari 404 0.07
+ Iceweasel NaN NaN
+ Comodo Dragon NaN NaN
+ IE10 404 0.08
+ Chrome 200 0.02
+
+ We can fill in the missing values by passing a value to
+ the keyword ``fill_value``. Because the index is not monotonically
+ increasing or decreasing, we cannot use arguments to the keyword
+ ``method`` to fill the ``NaN`` values.
+
+ >>> df.reindex(new_index, fill_value=0)
+ http_status response_time
+ Safari 404 0.07
+ Iceweasel 0 0.00
+ Comodo Dragon 0 0.00
+ IE10 404 0.08
+ Chrome 200 0.02
+
+ >>> df.reindex(new_index, fill_value='missing')
+ http_status response_time
+ Safari 404 0.07
+ Iceweasel missing missing
+ Comodo Dragon missing missing
+ IE10 404 0.08
+ Chrome 200 0.02
+
+ To further illustrate the filling functionality in
+ ``reindex``, we will create a dataframe with a
+ monotonically increasing index (for example, a sequence
+ of dates).
+
+ >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
+ >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
+ index=date_index)
+ >>> df2
+ prices
+ 2010-01-01 100
+ 2010-01-02 101
+ 2010-01-03 NaN
+ 2010-01-04 100
+ 2010-01-05 89
+ 2010-01-06 88
+
+ Suppose we decide to expand the dataframe to cover a wider
+ date range.
+
+ >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
+ >>> df2.reindex(date_index2)
+ prices
+ 2009-12-29 NaN
+ 2009-12-30 NaN
+ 2009-12-31 NaN
+ 2010-01-01 100
+ 2010-01-02 101
+ 2010-01-03 NaN
+ 2010-01-04 100
+ 2010-01-05 89
+ 2010-01-06 88
+ 2010-01-07 NaN
+
+ The index entries that did not have a value in the original data frame
+ (for example, '2009-12-29') are by default filled with ``NaN``.
+ If desired, we can fill in the missing values using one of several
+ options.
+
+ For example, to backpropagate the last valid value to fill the ``NaN``
+ values, pass ``bfill`` as an argument to the ``method`` keyword.
+
+ >>> df2.reindex(date_index2, method='bfill')
+ prices
+ 2009-12-29 100
+ 2009-12-30 100
+ 2009-12-31 100
+ 2010-01-01 100
+ 2010-01-02 101
+ 2010-01-03 NaN
+ 2010-01-04 100
+ 2010-01-05 89
+ 2010-01-06 88
+ 2010-01-07 NaN
+
+ Please note that the ``NaN`` value present in the original dataframe
+ (at index value 2010-01-03) will not be filled by any of the
+ value propagation schemes. This is because filling while reindexing
+ does not look at dataframe values, but only compares the original and
+ desired indexes. If you do want to fill in the ``NaN`` values present
+ in the original dataframe, use the ``fillna()`` method.
Returns
-------
| Fixes https://github.com/pydata/pandas/issues/10995
Done/waiting for feedback.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10996 | 2015-09-04T22:09:10Z | 2015-10-26T12:04:00Z | 2015-10-26T12:04:00Z | 2015-10-26T12:04:04Z |
Revising the HDF5 docs a bit, added another level to the toc | diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index fb63d0c6d66f1..f4469482ec290 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -107,7 +107,7 @@ See the package overview for more detail about what's in the library.
.. toctree::
- :maxdepth: 3
+ :maxdepth: 4
{% if single -%}
{{ single }}
diff --git a/doc/source/io.rst b/doc/source/io.rst
index ded314229225c..31d0be6151ba4 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -25,15 +25,15 @@
clipdf = DataFrame({'A':[1,2,3],'B':[4,5,6],'C':['p','q','r']},
index=['x','y','z'])
-*******************************
+===============================
IO Tools (Text, CSV, HDF5, ...)
-*******************************
+===============================
The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas``
object.
* :ref:`read_csv<io.read_csv_table>`
- * :ref:`read_excel<io.excel>`
+ * :ref:`read_excel<io.excel_reader>`
* :ref:`read_hdf<io.hdf5>`
* :ref:`read_sql<io.sql>`
* :ref:`read_json<io.json_reader>`
@@ -48,7 +48,7 @@ object.
The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()``
* :ref:`to_csv<io.store_in_csv>`
- * :ref:`to_excel<io.excel>`
+ * :ref:`to_excel<io.excel_writer>`
* :ref:`to_hdf<io.hdf5>`
* :ref:`to_sql<io.sql>`
* :ref:`to_json<io.json_writer>`
@@ -279,7 +279,7 @@ columns will come through as object dtype as with the rest of pandas objects.
.. _io.dtypes:
Specifying column data types
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''''
Starting with v0.10, you can indicate the data type for the whole DataFrame or
individual columns:
@@ -300,10 +300,13 @@ individual columns:
Specifying ``dtype`` with ``engine`` other than 'c' raises a
``ValueError``.
+Naming and Using Columns
+''''''''''''''''''''''''
+
.. _io.headers:
Handling column names
-~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++
A file may or may not have a header row. pandas assumes the first row should be
used as the column names:
@@ -335,7 +338,7 @@ If the header is in a row other than the first, pass the row number to
.. _io.usecols:
Filtering columns (``usecols``)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++++++++++++
The ``usecols`` argument allows you to select any subset of the columns in a
file, either using the column names or position numbers:
@@ -347,10 +350,14 @@ file, either using the column names or position numbers:
pd.read_csv(StringIO(data), usecols=['b', 'd'])
pd.read_csv(StringIO(data), usecols=[0, 2, 3])
+Comments and Empty Lines
+''''''''''''''''''''''''
+
.. _io.skiplines:
Ignoring line comments and empty lines
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++++++++++++++
+
If the ``comment`` parameter is specified, then completely commented lines will
be ignored. By default, completely blank lines will be ignored as well. Both of
these are API changes introduced in version 0.15.
@@ -391,10 +398,51 @@ If ``skip_blank_lines=False``, then ``read_csv`` will not ignore blank lines:
print(data)
pd.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
+.. _io.comments:
+
+Comments
+++++++++
+
+Sometimes comments or meta data may be included in a file:
+
+.. ipython:: python
+ :suppress:
+
+ data = ("ID,level,category\n"
+ "Patient1,123000,x # really unpleasant\n"
+ "Patient2,23000,y # wouldn't take his medicine\n"
+ "Patient3,1234018,z # awesome")
+
+ with open('tmp.csv', 'w') as fh:
+ fh.write(data)
+
+.. ipython:: python
+
+ print(open('tmp.csv').read())
+
+By default, the parser includes the comments in the output:
+
+.. ipython:: python
+
+ df = pd.read_csv('tmp.csv')
+ df
+
+We can suppress the comments using the ``comment`` keyword:
+
+.. ipython:: python
+
+ df = pd.read_csv('tmp.csv', comment='#')
+ df
+
+.. ipython:: python
+ :suppress:
+
+ os.remove('tmp.csv')
+
.. _io.unicode:
Dealing with Unicode Data
-~~~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''''
The ``encoding`` argument should be used for encoded unicode data, which will
result in byte strings being decoded to unicode in the result:
@@ -414,7 +462,7 @@ standard encodings
.. _io.index_col:
Index columns and trailing delimiters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''''''''''''''''
If a file has one more column of data than the number of column names, the
first column will be used as the DataFrame's row names:
@@ -444,8 +492,11 @@ index column inference and discard the last column, pass ``index_col=False``:
.. _io.parse_dates:
+Date Handling
+'''''''''''''
+
Specifying Date Columns
-~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++++
To better facilitate working with datetime data,
:func:`~pandas.io.parsers.read_csv` and :func:`~pandas.io.parsers.read_table`
@@ -545,27 +596,9 @@ data columns:
specify `index_col` as a column label rather then as an index on the resulting frame.
-.. _io.float_precision:
-
-Specifying method for floating-point conversion
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The parameter ``float_precision`` can be specified in order to use
-a specific floating-point converter during parsing with the C engine.
-The options are the ordinary converter, the high-precision converter, and
-the round-trip converter (which is guaranteed to round-trip values after
-writing to a file). For example:
-
-.. ipython:: python
-
- val = '0.3066101993807095471566981359501369297504425048828125'
- data = 'a,b,c\n1,2,{0}'.format(val)
- abs(pd.read_csv(StringIO(data), engine='c', float_precision=None)['c'][0] - float(val))
- abs(pd.read_csv(StringIO(data), engine='c', float_precision='high')['c'][0] - float(val))
- abs(pd.read_csv(StringIO(data), engine='c', float_precision='round_trip')['c'][0] - float(val))
-
-
Date Parsing Functions
-~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++
+
Finally, the parser allows you to specify a custom ``date_parser`` function to
take full advantage of the flexibility of the date parsing API:
@@ -618,7 +651,8 @@ a single date rather than the entire array.
Inferring Datetime Format
-~~~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++++++
+
If you have ``parse_dates`` enabled for some or all of your columns, and your
datetime strings are all formatted the same way, you may get a large speed
up by setting ``infer_datetime_format=True``. If set, pandas will attempt
@@ -656,7 +690,8 @@ representing December 30th, 2011 at 00:00:00)
os.remove('foo.csv')
International Date Formats
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++
+
While US date formats tend to be MM/DD/YYYY, many international formats use
DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided:
@@ -674,10 +709,31 @@ DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided:
pd.read_csv('tmp.csv', parse_dates=[0])
pd.read_csv('tmp.csv', dayfirst=True, parse_dates=[0])
+.. _io.float_precision:
+
+Specifying method for floating-point conversion
+'''''''''''''''''''''''''''''''''''''''''''''''
+
+The parameter ``float_precision`` can be specified in order to use
+a specific floating-point converter during parsing with the C engine.
+The options are the ordinary converter, the high-precision converter, and
+the round-trip converter (which is guaranteed to round-trip values after
+writing to a file). For example:
+
+.. ipython:: python
+
+ val = '0.3066101993807095471566981359501369297504425048828125'
+ data = 'a,b,c\n1,2,{0}'.format(val)
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision=None)['c'][0] - float(val))
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision='high')['c'][0] - float(val))
+ abs(pd.read_csv(StringIO(data), engine='c', float_precision='round_trip')['c'][0] - float(val))
+
+
.. _io.thousands:
Thousand Separators
-~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''
+
For large numbers that have been written with a thousands separator, you can
set the ``thousands`` keyword to a string of length 1 so that integers will be parsed
correctly:
@@ -721,7 +777,7 @@ The ``thousands`` keyword allows integers to be parsed correctly
.. _io.na_values:
NA Values
-~~~~~~~~~
+'''''''''
To control which values are parsed as missing values (which are signified by ``NaN``), specifiy a
string in ``na_values``. If you specify a list of strings, then all values in
@@ -762,54 +818,14 @@ the default values, in addition to the string ``"Nope"`` are recognized as ``NaN
.. _io.infinity:
Infinity
-~~~~~~~~
+''''''''
``inf`` like values will be parsed as ``np.inf`` (positive infinity), and ``-inf`` as ``-np.inf`` (negative infinity).
These will ignore the case of the value, meaning ``Inf``, will also be parsed as ``np.inf``.
-.. _io.comments:
-
-Comments
-~~~~~~~~
-Sometimes comments or meta data may be included in a file:
-
-.. ipython:: python
- :suppress:
-
- data = ("ID,level,category\n"
- "Patient1,123000,x # really unpleasant\n"
- "Patient2,23000,y # wouldn't take his medicine\n"
- "Patient3,1234018,z # awesome")
-
- with open('tmp.csv', 'w') as fh:
- fh.write(data)
-
-.. ipython:: python
-
- print(open('tmp.csv').read())
-
-By default, the parse includes the comments in the output:
-
-.. ipython:: python
-
- df = pd.read_csv('tmp.csv')
- df
-
-We can suppress the comments using the ``comment`` keyword:
-
-.. ipython:: python
-
- df = pd.read_csv('tmp.csv', comment='#')
- df
-
-.. ipython:: python
- :suppress:
-
- os.remove('tmp.csv')
-
Returning Series
-~~~~~~~~~~~~~~~~
+''''''''''''''''
Using the ``squeeze`` keyword, the parser will return output with a single column
as a ``Series``:
@@ -842,7 +858,7 @@ as a ``Series``:
.. _io.boolean:
Boolean values
-~~~~~~~~~~~~~~
+''''''''''''''
The common values ``True``, ``False``, ``TRUE``, and ``FALSE`` are all
recognized as boolean. Sometime you would want to recognize some other values
@@ -859,7 +875,7 @@ options:
.. _io.bad_lines:
Handling "bad" lines
-~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''
Some files may have malformed lines with too few fields or too many. Lines with
too few fields will have NA values filled in the trailing fields. Lines with
@@ -894,7 +910,7 @@ You can elect to skip bad lines:
.. _io.quoting:
Quoting and Escape Characters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''''''''
Quotes (and other escape characters) in embedded fields can be handled in any
number of ways. One way is to use backslashes; to properly parse this data, you
@@ -909,7 +925,8 @@ should pass the ``escapechar`` option:
.. _io.fwf:
Files with Fixed Width Columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''''''
+
While ``read_csv`` reads delimited data, the :func:`~pandas.io.parsers.read_fwf`
function works with data files that have known and fixed column widths.
The function parameters to ``read_fwf`` are largely the same as `read_csv` with
@@ -982,8 +999,11 @@ is whitespace).
os.remove('bar.csv')
+Indexes
+'''''''
+
Files with an "implicit" index column
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++++++++++++++++++
.. ipython:: python
:suppress:
@@ -1021,7 +1041,7 @@ to do as before:
Reading an index with a ``MultiIndex``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++++++++++++++
.. _io.csv_multiindex:
@@ -1044,7 +1064,7 @@ returned object:
.. _io.multi_index_columns:
Reading columns with a ``MultiIndex``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++++++++++++++++++
By specifying list of row locations for the ``header`` argument, you
can read in a ``MultiIndex`` for the columns. Specifying non-consecutive
@@ -1088,7 +1108,7 @@ with ``df.to_csv(..., index=False``), then any ``names`` on the columns index wi
.. _io.sniff:
Automatically "sniffing" the delimiter
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''''''''''''''
``read_csv`` is capable of inferring delimited (not necessarily
comma-separated) files, as pandas uses the :class:`python:csv.Sniffer`
@@ -1109,7 +1129,7 @@ class of the csv module. For this, you have to specify ``sep=None``.
.. _io.chunking:
Iterating through files chunk by chunk
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''''''''''''''
Suppose you wish to iterate through a (potentially very large) file lazily
rather than reading the entire file into memory, such as the following:
@@ -1148,7 +1168,7 @@ Specifying ``iterator=True`` will also return the ``TextFileReader`` object:
os.remove('tmp2.sv')
Specifying the parser engine
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''''
Under the hood pandas uses a fast and efficient parser implemented in C as well
as a python implementation which is currently more feature-complete. Where
@@ -1163,10 +1183,13 @@ options include:
Specifying any of the above options will produce a ``ParserWarning`` unless the
python engine is selected explicitly using ``engine='python'``.
+Writing out Data
+''''''''''''''''
+
.. _io.store_in_csv:
Writing to CSV format
-~~~~~~~~~~~~~~~~~~~~~
++++++++++++++++++++++
The Series and DataFrame objects have an instance method ``to_csv`` which
allows storing the contents of the object as a comma-separated-values file. The
@@ -1197,7 +1220,7 @@ function takes a number of arguments. Only the first is required.
- ``date_format``: Format string for datetime objects
Writing a formatted string
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++
.. _io.formatting:
@@ -1235,7 +1258,7 @@ Read and write ``JSON`` format files and strings.
.. _io.json_writer:
Writing JSON
-~~~~~~~~~~~~
+''''''''''''
A ``Series`` or ``DataFrame`` can be converted to a valid JSON string. Use ``to_json``
with optional parameters:
@@ -1426,7 +1449,7 @@ which can be dealt with by specifying a simple ``default_handler``:
.. _io.json_reader:
Reading JSON
-~~~~~~~~~~~~
+''''''''''''
Reading a JSON string to pandas object can take a number of parameters.
The parser will try to parse a ``DataFrame`` if ``typ`` is not supplied or
@@ -1488,9 +1511,9 @@ be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2')
Large integer values may be converted to dates if ``convert_dates=True`` and the data and / or column labels appear 'date-like'. The exact threshold depends on the ``date_unit`` specified. 'date-like' means that the column label meets one of the following criteria:
- * it ends with ``'_at'``
+ * it ends with ``'_at'``
* it ends with ``'_time'``
- * it begins with ``'timestamp'``
+ * it begins with ``'timestamp'``
* it is ``'modified'``
* it is ``'date'``
@@ -1631,7 +1654,7 @@ The speedup is less noticeable for smaller datasets:
.. _io.json_normalize:
Normalization
-~~~~~~~~~~~~~
+'''''''''''''
.. versionadded:: 0.13.0
@@ -1665,7 +1688,7 @@ HTML
.. _io.read_html:
Reading HTML Content
-~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''
.. warning::
@@ -1820,7 +1843,7 @@ succeeds, the function will return*.
.. _io.html:
Writing to HTML files
-~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''
``DataFrame`` objects have an instance method ``to_html`` which renders the
contents of the ``DataFrame`` as an HTML table. The function arguments are as
@@ -1961,8 +1984,10 @@ module and use the same parsing code as the above to convert tabular data into
a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some
advanced strategies
+.. _io.excel_reader:
+
Reading Excel Files
-~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''
.. versionadded:: 0.16
@@ -2102,8 +2127,13 @@ missing data to recover integer dtype:
cfun = lambda x: int(x) if x else -1
read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun})
+.. _io.excel_writer:
+
Writing Excel Files
-~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''
+
+Writing Excel Files to Disk
++++++++++++++++++++++++++++
To write a DataFrame object to a sheet of an Excel file, you can use the
``to_excel`` instance method. The arguments are largely the same as ``to_csv``
@@ -2149,10 +2179,49 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`.
1``). You can pass ``convert_float=False`` to disable this behavior, which
may give a slight performance improvement.
+.. _io.excel_writing_buffer:
+
+Writing Excel Files to Memory
++++++++++++++++++++++++++++++
+
+.. versionadded:: 0.17
+
+Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or
+``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`.
+
+.. code-block:: python
+
+ # Safe import for either Python 2.x or 3.x
+ try:
+ from io import BytesIO
+ except ImportError:
+ from cStringIO import StringIO as BytesIO
+
+ bio = BytesIO()
+
+ # By setting the 'engine' in the ExcelWriter constructor.
+ writer = ExcelWriter(bio, engine='xlsxwriter')
+ df.to_excel(writer, sheet_name='Sheet1')
+
+ # Save the workbook
+ writer.save()
+
+ # Seek to the beginning and read to copy the workbook to a variable in memory
+ bio.seek(0)
+ workbook = bio.read()
+
+.. note::
+
+ ``engine`` is optional but recommended. Setting the engine determines
+ the version of workbook produced. Setting ``engine='xlrd'`` will produce an
+ Excel 2003-format workbook (xls). Using either ``'openpyxl'`` or
+ ``'xlsxwriter'`` will produce an Excel 2007-format workbook (xlsx). If
+ omitted, an Excel 2007-formatted workbook is produced.
+
.. _io.excel.writers:
Excel writer engines
-~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''
.. versionadded:: 0.13
@@ -2194,45 +2263,6 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
-.. _io.excel_writing_buffer:
-
-Writing Excel Files to Memory
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 0.17
-
-Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or
-``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`.
-
-.. code-block:: python
-
- # Safe import for either Python 2.x or 3.x
- try:
- from io import BytesIO
- except ImportError:
- from cStringIO import StringIO as BytesIO
-
- bio = BytesIO()
-
- # By setting the 'engine' in the ExcelWriter constructor.
- writer = ExcelWriter(bio, engine='xlsxwriter')
- df.to_excel(writer, sheet_name='Sheet1')
-
- # Save the workbook
- writer.save()
-
- # Seek to the beginning and read to copy the workbook to a variable in memory
- bio.seek(0)
- workbook = bio.read()
-
-.. note::
-
- ``engine`` is optional but recommended. Setting the engine determines
- the version of workbook produced. Setting ``engine='xlrd'`` will produce an
- Excel 2003-format workbook (xls). Using either ``'openpyxl'`` or
- ``'xlsxwriter'`` will produce an Excel 2007-format workbook (xlsx). If
- omitted, an Excel 2007-formatted workbook is produced.
-
.. _io.clipboard:
Clipboard
@@ -2387,7 +2417,7 @@ pandas objects.
os.remove('foo2.msg')
Read/Write API
-~~~~~~~~~~~~~~
+''''''''''''''
Msgpacks can also be read from and written to strings.
@@ -2502,7 +2532,7 @@ Closing a Store, Context Manager
Read/Write API
-~~~~~~~~~~~~~~
+''''''''''''''
``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing,
similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0)
@@ -2581,7 +2611,7 @@ This is also true for the major axis of a ``Panel``:
.. _io.hdf5-fixed:
Fixed Format
-~~~~~~~~~~~~
+''''''''''''
.. note::
@@ -2610,7 +2640,7 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for
.. _io.hdf5-table:
Table Format
-~~~~~~~~~~~~
+''''''''''''
``HDFStore`` supports another ``PyTables`` format on disk, the ``table``
format. Conceptually a ``table`` is shaped very much like a DataFrame,
@@ -2654,7 +2684,7 @@ enable ``put/append/to_hdf`` to by default store in the ``table`` format.
.. _io.hdf5-keys:
Hierarchical Keys
-~~~~~~~~~~~~~~~~~
+'''''''''''''''''
Keys to a store can be specified as a string. These can be in a
hierarchical path-name like format (e.g. ``foo/bar/bah``), which will
@@ -2679,8 +2709,11 @@ everything in the sub-store and BELOW, so be *careful*.
.. _io.hdf5-types:
+Storing Types
+'''''''''''''
+
Storing Mixed Types in a Table
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++++++
Storing mixed-dtype data is supported. Strings are stored as a
fixed-width using the maximum size of the appended column. Subsequent
@@ -2714,7 +2747,7 @@ defaults to `nan`.
store.root.df_mixed.table
Storing Multi-Index DataFrames
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++++++++++
Storing multi-index dataframes as tables is very similar to
storing/selecting from homogeneous index DataFrames.
@@ -2739,8 +2772,11 @@ storing/selecting from homogeneous index DataFrames.
.. _io.hdf5-query:
+Querying
+''''''''
+
Querying a Table
-~~~~~~~~~~~~~~~~
+++++++++++++++++
.. warning::
@@ -2755,20 +2791,20 @@ data.
A query is specified using the ``Term`` class under the hood, as a boolean expression.
- - ``index`` and ``columns`` are supported indexers of a DataFrame
- - ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of
- the Panel
- - if ``data_columns`` are specified, these can be used as additional indexers
+- ``index`` and ``columns`` are supported indexers of a DataFrame
+- ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of
+ the Panel
+- if ``data_columns`` are specified, these can be used as additional indexers
Valid comparison operators are:
- - ``=, ==, !=, >, >=, <, <=``
+``=, ==, !=, >, >=, <, <=``
Valid boolean expressions are combined with:
- - ``|`` : or
- - ``&`` : and
- - ``(`` and ``)`` : for grouping
+- ``|`` : or
+- ``&`` : and
+- ``(`` and ``)`` : for grouping
These rules are similar to how boolean expressions are used in pandas for indexing.
@@ -2781,28 +2817,28 @@ These rules are similar to how boolean expressions are used in pandas for indexi
The following are valid expressions:
- - ``'index>=date'``
- - ``"columns=['A', 'D']"``
- - ``"columns in ['A', 'D']"``
- - ``'columns=A'``
- - ``'columns==A'``
- - ``"~(columns=['A','B'])"``
- - ``'index>df.index[3] & string="bar"'``
- - ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'``
- - ``"ts>=Timestamp('2012-02-01')"``
- - ``"major_axis>=20130101"``
+- ``'index>=date'``
+- ``"columns=['A', 'D']"``
+- ``"columns in ['A', 'D']"``
+- ``'columns=A'``
+- ``'columns==A'``
+- ``"~(columns=['A','B'])"``
+- ``'index>df.index[3] & string="bar"'``
+- ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'``
+- ``"ts>=Timestamp('2012-02-01')"``
+- ``"major_axis>=20130101"``
The ``indexers`` are on the left-hand side of the sub-expression:
- - ``columns``, ``major_axis``, ``ts``
+``columns``, ``major_axis``, ``ts``
The right-hand side of the sub-expression (after a comparison operator) can be:
- - functions that will be evaluated, e.g. ``Timestamp('2012-02-01')``
- - strings, e.g. ``"bar"``
- - date-like, e.g. ``20130101``, or ``"20130101"``
- - lists, e.g. ``"['A','B']"``
- - variables that are defined in the local names space, e.g. ``date``
+- functions that will be evaluated, e.g. ``Timestamp('2012-02-01')``
+- strings, e.g. ``"bar"``
+- date-like, e.g. ``20130101``, or ``"20130101"``
+- lists, e.g. ``"['A','B']"``
+- variables that are defined in the local names space, e.g. ``date``
.. note::
@@ -2893,7 +2929,8 @@ space. These are in terms of the total number of rows in a table.
.. _io.hdf5-timedelta:
-**Using timedelta64[ns]**
+Using timedelta64[ns]
++++++++++++++++++++++
.. versionadded:: 0.13
@@ -2901,10 +2938,6 @@ Beginning in 0.13.0, you can store and query using the ``timedelta64[ns]`` type.
specified in the format: ``<float>(<unit>)``, where float may be signed (and fractional), and unit can be
``D,s,ms,us,ns`` for the timedelta. Here's an example:
-.. warning::
-
- This requires ``numpy >= 1.7``
-
.. ipython:: python
from datetime import timedelta
@@ -2915,7 +2948,7 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra
store.select('dftd',"C<'-3.5D'")
Indexing
-~~~~~~~~
+++++++++
You can create/modify an index for a table with ``create_table_index``
after data is already in the table (after and ``append/put``
@@ -2943,7 +2976,7 @@ indexed dimension as the ``where``.
See `here <http://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index>`__ for how to create a completely-sorted-index (CSI) on an existing store.
Query via Data Columns
-~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++
You can designate (and index) certain columns that you want to be able
to perform queries (other than the `indexable` columns, which you can
@@ -2983,7 +3016,7 @@ append/put operation (Of course you can simply read in the data and
create a new table!)
Iterator
-~~~~~~~~
+++++++++
Starting in ``0.11.0``, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk``
to ``select`` and ``select_as_multiple`` to return an iterator on the results.
@@ -3029,9 +3062,10 @@ chunks.
print store.select('dfeq',where=c)
Advanced Queries
-~~~~~~~~~~~~~~~~
+++++++++++++++++
-**Select a Single Column**
+Select a Single Column
+^^^^^^^^^^^^^^^^^^^^^^
To retrieve a single indexable or data column, use the
method ``select_column``. This will, for example, enable you to get the index
@@ -3045,7 +3079,8 @@ These do not currently accept the ``where`` selector.
.. _io.hdf5-selecting_coordinates:
-**Selecting coordinates**
+Selecting coordinates
+^^^^^^^^^^^^^^^^^^^^^
Sometimes you want to get the coordinates (a.k.a the index locations) of your query. This returns an
``Int64Index`` of the resulting locations. These coordinates can also be passed to subsequent
@@ -3061,7 +3096,8 @@ Sometimes you want to get the coordinates (a.k.a the index locations) of your qu
.. _io.hdf5-where_mask:
-**Selecting using a where mask**
+Selecting using a where mask
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometime your query can involve creating a list of rows to select. Usually this ``mask`` would
be a resulting ``index`` from an indexing operation. This example selects the months of
@@ -3075,7 +3111,8 @@ a datetimeindex which are 5.
where = c[DatetimeIndex(c).month==5].index
store.select('df_mask',where=where)
-**Storer Object**
+Storer Object
+^^^^^^^^^^^^^
If you want to inspect the stored object, retrieve via
``get_storer``. You could use this programmatically to say get the number
@@ -3087,7 +3124,7 @@ of rows in an object.
Multiple Table Queries
-~~~~~~~~~~~~~~~~~~~~~~
+++++++++++++++++++++++
New in 0.10.1 are the methods ``append_to_multiple`` and
``select_as_multiple``, that can perform appending/selecting from
@@ -3136,7 +3173,7 @@ results.
Delete from a Table
-~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''
You can delete from a table selectively by specifying a ``where``. In
deleting rows, it is important to understand the ``PyTables`` deletes
@@ -3152,15 +3189,15 @@ simple use case. You store panel-type data, with dates in the
``major_axis`` and ids in the ``minor_axis``. The data is then
interleaved like this:
- - date_1
- - id_1
- - id_2
- - .
- - id_n
- - date_2
- - id_1
- - .
- - id_n
+- date_1
+ - id_1
+ - id_2
+ - .
+ - id_n
+- date_2
+ - id_1
+ - .
+ - id_n
It should be clear that a delete operation on the ``major_axis`` will be
fairly quick, as one chunk is removed, then the following data moved. On
@@ -3174,21 +3211,29 @@ the table using a ``where`` that selects all but the missing data.
store.remove('wp', 'major_axis>20000102' )
store.select('wp')
-Please note that HDF5 **DOES NOT RECLAIM SPACE** in the h5 files
-automatically. Thus, repeatedly deleting (or removing nodes) and adding
-again **WILL TEND TO INCREASE THE FILE SIZE**. To *clean* the file, use
-``ptrepack`` (see below).
+.. warning::
+
+ Please note that HDF5 **DOES NOT RECLAIM SPACE** in the h5 files
+ automatically. Thus, repeatedly deleting (or removing nodes) and adding
+ again **WILL TEND TO INCREASE THE FILE SIZE**. To *clean* the file, use
+ :ref:`ptrepack <io.hdf5-ptrepack>`
+
+.. _io.hdf5-notes:
+
+Notes & Caveats
+'''''''''''''''
+
Compression
-~~~~~~~~~~~
++++++++++++
``PyTables`` allows the stored data to be compressed. This applies to
all kinds of stores, not just tables.
- - Pass ``complevel=int`` for a compression level (1-9, with 0 being no
- compression, and the default)
- - Pass ``complib=lib`` where lib is any of ``zlib, bzip2, lzo, blosc`` for
- whichever compression library you prefer.
+- Pass ``complevel=int`` for a compression level (1-9, with 0 being no
+ compression, and the default)
+- Pass ``complib=lib`` where lib is any of ``zlib, bzip2, lzo, blosc`` for
+ whichever compression library you prefer.
``HDFStore`` will use the file based compression scheme if no overriding
``complib`` or ``complevel`` options are provided. ``blosc`` offers very
@@ -3197,14 +3242,21 @@ may not be installed (by Python) by default.
Compression for all objects within the file
- - ``store_compressed = HDFStore('store_compressed.h5', complevel=9, complib='blosc')``
+.. code-block:: python
+
+ store_compressed = HDFStore('store_compressed.h5', complevel=9, complib='blosc')
Or on-the-fly compression (this only applies to tables). You can turn
off file compression for a specific table by passing ``complevel=0``
- - ``store.append('df', df, complib='zlib', complevel=5)``
+.. code-block:: python
+
+ store.append('df', df, complib='zlib', complevel=5)
-**ptrepack**
+.. _io.hdf5-ptrepack:
+
+ptrepack
+++++++++
``PyTables`` offers better write performance when tables are compressed after
they are written, as opposed to turning on compression at the very
@@ -3212,42 +3264,39 @@ beginning. You can use the supplied ``PyTables`` utility
``ptrepack``. In addition, ``ptrepack`` can change compression levels
after the fact.
- - ``ptrepack --chunkshape=auto --propindexes --complevel=9 --complib=blosc in.h5 out.h5``
+.. code-block:: console
+
+ ptrepack --chunkshape=auto --propindexes --complevel=9 --complib=blosc in.h5 out.h5
Furthermore ``ptrepack in.h5 out.h5`` will *repack* the file to allow
you to reuse previously deleted space. Alternatively, one can simply
remove the file and write again, or use the ``copy`` method.
-.. _io.hdf5-notes:
+.. _io.hdf5-caveats:
-Notes & Caveats
-~~~~~~~~~~~~~~~
-
- - Once a ``table`` is created its items (Panel) / columns (DataFrame)
- are fixed; only exactly the same columns can be appended
- - If a row has ``np.nan`` for **EVERY COLUMN** (having a ``nan``
- in a string, or a ``NaT`` in a datetime-like column counts as having
- a value), then those rows **WILL BE DROPPED IMPLICITLY**. This limitation
- *may* be addressed in the future.
- - ``HDFStore`` is **not-threadsafe for writing**. The underlying
- ``PyTables`` only supports concurrent reads (via threading or
- processes). If you need reading and writing *at the same time*, you
- need to serialize these operations in a single thread in a single
- process. You will corrupt your data otherwise. See the (:issue:`2397`) for more information.
- - If you use locks to manage write access between multiple processes, you
- may want to use :py:func:`~os.fsync` before releasing write locks. For
- convenience you can use ``store.flush(fsync=True)`` to do this for you.
- - ``PyTables`` only supports fixed-width string columns in
- ``tables``. The sizes of a string based indexing column
- (e.g. *columns* or *minor_axis*) are determined as the maximum size
- of the elements in that axis or by passing the parameter
- - Be aware that timezones (e.g., ``pytz.timezone('US/Eastern')``)
- are not necessarily equal across timezone versions. So if data is
- localized to a specific timezone in the HDFStore using one version
- of a timezone library and that data is updated with another version, the data
- will be converted to UTC since these timezones are not considered
- equal. Either use the same version of timezone library or use ``tz_convert`` with
- the updated timezone definition.
+Caveats
++++++++
+
+.. warning::
+
+ ``HDFStore`` is **not-threadsafe for writing**. The underlying
+ ``PyTables`` only supports concurrent reads (via threading or
+ processes). If you need reading and writing *at the same time*, you
+ need to serialize these operations in a single thread in a single
+ process. You will corrupt your data otherwise. See the (:issue:`2397`) for more information.
+
+- If you use locks to manage write access between multiple processes, you
+ may want to use :py:func:`~os.fsync` before releasing write locks. For
+ convenience you can use ``store.flush(fsync=True)`` to do this for you.
+- Once a ``table`` is created its items (Panel) / columns (DataFrame)
+ are fixed; only exactly the same columns can be appended
+- Be aware that timezones (e.g., ``pytz.timezone('US/Eastern')``)
+ are not necessarily equal across timezone versions. So if data is
+ localized to a specific timezone in the HDFStore using one version
+ of a timezone library and that data is updated with another version, the data
+ will be converted to UTC since these timezones are not considered
+ equal. Either use the same version of timezone library or use ``tz_convert`` with
+ the updated timezone definition.
.. warning::
@@ -3258,8 +3307,10 @@ Notes & Caveats
Other identifiers cannot be used in a ``where`` clause
and are generally a bad idea.
+.. _io.hdf5-data_types:
+
DataTypes
-~~~~~~~~~
+'''''''''
``HDFStore`` will map an object dtype to the ``PyTables`` underlying
dtype. This means the following types are known to work:
@@ -3281,7 +3332,7 @@ object : ``strings`` ``np.nan``
.. _io.hdf5-categorical:
Categorical Data
-~~~~~~~~~~~~~~~~
+++++++++++++++++
.. versionadded:: 0.15.2
@@ -3326,7 +3377,7 @@ stored in a more efficient manner.
String Columns
-~~~~~~~~~~~~~~
+++++++++++++++
**min_itemsize**
@@ -3345,7 +3396,7 @@ Starting in 0.11.0, passing a ``min_itemsize`` dict will cause all passed column
.. note::
- If you are not passing any *data_columns*, then the min_itemsize will be the maximum of the length of any string passed
+ If you are not passing any ``data_columns``, then the ``min_itemsize`` will be the maximum of the length of any string passed
.. ipython:: python
@@ -3381,7 +3432,7 @@ You could inadvertently turn an actual ``nan`` value into a missing value.
.. _io.external_compatibility:
External Compatibility
-~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''
``HDFStore`` writes ``table`` format objects in specific formats suitable for
producing loss-less round trips to pandas objects. For external
@@ -3470,7 +3521,7 @@ Now you can import the ``DataFrame`` into R:
single HDF5 file.
Backwards Compatibility
-~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''
0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas,
however query terms using the
@@ -3508,7 +3559,7 @@ number of options, please see the docstring.
Performance
-~~~~~~~~~~~
+'''''''''''
- ``tables`` format come with a writing performance penalty as compared to
``fixed`` stores. The benefit is the ability to append/delete and
@@ -3531,7 +3582,7 @@ Performance
for more information and some solutions.
Experimental
-~~~~~~~~~~~~
+''''''''''''
HDFStore supports ``Panel4D`` storage.
@@ -3625,7 +3676,7 @@ If you want to manage your own connections you can pass one of those instead:
data = pd.read_sql_table('data', conn)
Writing DataFrames
-~~~~~~~~~~~~~~~~~~
+''''''''''''''''''
Assuming the following data is in a DataFrame ``data``, we can insert it into
the database using :func:`~pandas.DataFrame.to_sql`.
@@ -3699,7 +3750,7 @@ default ``Text`` type for string columns:
a categorical.
Reading Tables
-~~~~~~~~~~~~~~
+''''''''''''''
:func:`~pandas.read_sql_table` will read a database table given the
table name and optionally a subset of columns to read.
@@ -3739,7 +3790,7 @@ to pass to :func:`pandas.to_datetime`:
You can check if a table exists using :func:`~pandas.io.sql.has_table`
Schema support
-~~~~~~~~~~~~~~
+''''''''''''''
.. versionadded:: 0.15.0
@@ -3754,7 +3805,7 @@ have schema's). For example:
pd.read_sql_table('table', engine, schema='other_schema')
Querying
-~~~~~~~~
+''''''''
You can query using raw SQL in the :func:`~pandas.read_sql_query` function.
In this case you must use the SQL variant appropriate for your database.
@@ -3798,7 +3849,7 @@ variant appropriate for your database.
Engine connection examples
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+''''''''''''''''''''''''''
To connect with SQLAlchemy you use the :func:`create_engine` function to create an engine
object from database URI. You only need to create the engine once per database you are
@@ -3827,7 +3878,7 @@ For more information see the examples the SQLAlchemy `documentation <http://docs
Sqlite fallback
-~~~~~~~~~~~~~~~
+'''''''''''''''
The use of sqlite is supported without using SQLAlchemy.
This mode requires a Python database adapter which respect the `Python
@@ -3951,7 +4002,7 @@ Stata Format
.. _io.stata_writer:
Writing to Stata format
-~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''
The method :func:`~pandas.core.frame.DataFrame.to_stata` will write a DataFrame
into a .dta file. The format version of this file is always 115 (Stata 12).
@@ -4001,7 +4052,7 @@ outside of this range, the variable is cast to ``int16``.
.. _io.stata_reader:
Reading from Stata format
-~~~~~~~~~~~~~~~~~~~~~~~~~
+'''''''''''''''''''''''''
The top-level function ``read_stata`` will read a dta file and return
either a DataFrame or a :class:`~pandas.io.stata.StataReader` that can
@@ -4068,7 +4119,7 @@ values will have ``object`` data type.
.. _io.stata-categorical:
Categorical Data
-~~~~~~~~~~~~~~~~
+++++++++++++++++
.. versionadded:: 0.15.2
@@ -4121,7 +4172,7 @@ cleanly to its tabular data model. For reading and writing other file formats
into and from pandas, we recommend these packages from the broader community.
netCDF
-~~~~~~
+''''''
xray_ provides data structures inspired by the pandas DataFrame for working
with multi-dimensional datasets, with a focus on the netCDF file format and
@@ -4131,12 +4182,14 @@ easy conversion to and from pandas.
.. _io.sas:
+.. _io.sas_reader:
+
SAS Format
----------
.. versionadded:: 0.17.0
-The top-level function :function:`read_sas` currently can read (but
+The top-level function :func:`read_sas` currently can read (but
not write) SAS xport (.XPT) format files. Pandas cannot currently
handle SAS7BDAT files.
diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t
index 61b0e2cce5e5a..33644101eb425 100644
--- a/doc/source/themes/nature_with_gtoc/static/nature.css_t
+++ b/doc/source/themes/nature_with_gtoc/static/nature.css_t
@@ -31,7 +31,7 @@ div.bodywrapper {
/* ugly hack, probably not attractive with other font size for re*/
margin: 0 0 0 {{ theme_sidebarwidth|toint}}px;
min-width: 540px;
- max-width: 720px;
+ max-width: 800px;
}
diff --git a/doc/source/themes/nature_with_gtoc/theme.conf b/doc/source/themes/nature_with_gtoc/theme.conf
index 1cc40044646bb..290a07bde8806 100644
--- a/doc/source/themes/nature_with_gtoc/theme.conf
+++ b/doc/source/themes/nature_with_gtoc/theme.conf
@@ -2,3 +2,6 @@
inherit = basic
stylesheet = nature.css
pygments_style = tango
+
+[options]
+sidebarwidth = 270
| - makes the toc 4 levels
- increases the width of the side bar a bit
- increases the overall width just a bit


| https://api.github.com/repos/pandas-dev/pandas/pulls/10992 | 2015-09-04T19:16:02Z | 2015-09-05T14:19:49Z | 2015-09-05T14:19:49Z | 2015-09-05T14:19:49Z |
COMPAT/TST: fix group_info dtype issues, xref #10981 | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index f42825a11933b..7a5770d3968ec 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1793,13 +1793,13 @@ def indices(self):
@cache_readonly
def group_info(self):
ngroups = self.ngroups
- obs_group_ids = np.arange(ngroups)
+ obs_group_ids = np.arange(ngroups, dtype='int64')
rep = np.diff(np.r_[0, self.bins])
if ngroups == len(self.bins):
- comp_ids = np.repeat(np.arange(ngroups), rep)
+ comp_ids = np.repeat(np.arange(ngroups, dtype='int64'), rep)
else:
- comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
+ comp_ids = np.repeat(np.r_[-1, np.arange(ngroups, dtype='int64')], rep)
return comp_ids, obs_group_ids, ngroups
@@ -2552,8 +2552,8 @@ def nunique(self, dropna=True):
# group boundries are where group ids change
# unique observations are where sorted values change
- idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
- inc = np.r_[1, val[1:] != val[:-1]]
+ idx = com._ensure_int64(np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]])
+ inc = com._ensure_int64(np.r_[1, val[1:] != val[:-1]])
# 1st item of each group is a new unique observation
mask = isnull(val)
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 49d344631e4b9..ec03d558e45b8 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -919,7 +919,7 @@ def test_resample_timegrouper(self):
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
- ts = Series(np.random.randint(0, n // k, n),
+ ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T', how='nunique')
@@ -1585,7 +1585,7 @@ def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
- data = np.random.randn(n, 4)
+ data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
| closes #10981
| https://api.github.com/repos/pandas-dev/pandas/pulls/10988 | 2015-09-04T14:09:13Z | 2015-09-04T15:34:30Z | 2015-09-04T15:34:30Z | 2015-09-13T20:00:25Z |
TST: test_nanops turns off bottneck for all tests after | diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index bb6cb5a444dd9..3615cc3dc8ad8 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -842,6 +842,44 @@ def no_nans(x):
_consistency_data = _create_consistency_data()
class TestMomentsConsistency(Base):
+ base_functions = [
+ (lambda v: Series(v).count(), None, 'count'),
+ (lambda v: Series(v).max(), None, 'max'),
+ (lambda v: Series(v).min(), None, 'min'),
+ (lambda v: Series(v).sum(), None, 'sum'),
+ (lambda v: Series(v).mean(), None, 'mean'),
+ (lambda v: Series(v).std(), 1, 'std'),
+ (lambda v: Series(v).cov(Series(v)), None, 'cov'),
+ (lambda v: Series(v).corr(Series(v)), None, 'corr'),
+ (lambda v: Series(v).var(), 1, 'var'),
+ #(lambda v: Series(v).skew(), 3, 'skew'), # restore once GH 8086 is fixed
+ #(lambda v: Series(v).kurt(), 4, 'kurt'), # restore once GH 8086 is fixed
+ #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods, 'quantile'),
+ # lambda v: Series(v).quantile(0.3), None, 'quantile'), # restore once GH 8084 is fixed
+ (lambda v: Series(v).median(), None ,'median'),
+ (np.nanmax, 1, 'max'),
+ (np.nanmin, 1, 'min'),
+ (np.nansum, 1, 'sum'),
+ ]
+ if np.__version__ >= LooseVersion('1.8.0'):
+ base_functions += [
+ (np.nanmean, 1, 'mean'),
+ (lambda v: np.nanstd(v, ddof=1), 1 ,'std'),
+ (lambda v: np.nanvar(v, ddof=1), 1 ,'var'),
+ ]
+ if np.__version__ >= LooseVersion('1.9.0'):
+ base_functions += [
+ (np.nanmedian, 1, 'median'),
+ ]
+ no_nan_functions = [
+ (np.max, None, 'max'),
+ (np.min, None, 'min'),
+ (np.sum, None, 'sum'),
+ (np.mean, None, 'mean'),
+ (lambda v: np.std(v, ddof=1), 1 ,'std'),
+ (lambda v: np.var(v, ddof=1), 1 ,'var'),
+ (np.median, None, 'median'),
+ ]
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
@@ -877,9 +915,11 @@ def _non_null_values(x):
# self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
+ exp = x.max() if isinstance(x, Series) else x.max().max()
+
# check mean of constant series
expected = x * np.nan
- expected[count_x >= max(min_periods, 1)] = x.max().max()
+ expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
@@ -1030,44 +1070,6 @@ def _ewma(s, com, min_periods, adjust, ignore_na):
@slow
def test_expanding_consistency(self):
- base_functions = [
- (mom.expanding_count, lambda v: Series(v).count(), None),
- (mom.expanding_max, lambda v: Series(v).max(), None),
- (mom.expanding_min, lambda v: Series(v).min(), None),
- (mom.expanding_sum, lambda v: Series(v).sum(), None),
- (mom.expanding_mean, lambda v: Series(v).mean(), None),
- (mom.expanding_std, lambda v: Series(v).std(), 1),
- (mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None),
- (mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None),
- (mom.expanding_var, lambda v: Series(v).var(), 1),
- #(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
- #(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
- #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods),
- # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
- (mom.expanding_median, lambda v: Series(v).median(), None),
- (mom.expanding_max, np.nanmax, 1),
- (mom.expanding_min, np.nanmin, 1),
- (mom.expanding_sum, np.nansum, 1),
- ]
- if np.__version__ >= LooseVersion('1.8.0'):
- base_functions += [
- (mom.expanding_mean, np.nanmean, 1),
- (mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1),
- (mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1),
- ]
- if np.__version__ >= LooseVersion('1.9.0'):
- base_functions += [
- (mom.expanding_median, np.nanmedian, 1),
- ]
- no_nan_functions = [
- (mom.expanding_max, np.max, None),
- (mom.expanding_min, np.min, None),
- (mom.expanding_sum, np.sum, None),
- (mom.expanding_mean, np.mean, None),
- (mom.expanding_std, lambda v: np.std(v, ddof=1), 1),
- (mom.expanding_var, lambda v: np.var(v, ddof=1), 1),
- (mom.expanding_median, np.median, None),
- ]
# suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames
with warnings.catch_warnings():
@@ -1095,12 +1097,14 @@ def test_expanding_consistency(self):
# or (b) expanding_apply of np.nanxyz()
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
- functions = base_functions
+ functions = self.base_functions
# GH 8269
if no_nans:
- functions = base_functions + no_nan_functions
- for (expanding_f, f, require_min_periods) in functions:
+ functions = self.base_functions + self.no_nan_functions
+ for (f, require_min_periods, name) in functions:
+ expanding_f = getattr(mom,'expanding_{0}'.format(name))
+
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
@@ -1113,7 +1117,9 @@ def test_expanding_consistency(self):
else:
expanding_f_result = expanding_f(x, min_periods=min_periods)
expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods)
- assert_equal(expanding_f_result, expanding_apply_f_result)
+
+ if not tm._incompat_bottleneck_version(name):
+ assert_equal(expanding_f_result, expanding_apply_f_result)
if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame):
# test pairwise=True
@@ -1127,45 +1133,6 @@ def test_expanding_consistency(self):
@slow
def test_rolling_consistency(self):
- base_functions = [
- (mom.rolling_count, lambda v: Series(v).count(), None),
- (mom.rolling_max, lambda v: Series(v).max(), None),
- (mom.rolling_min, lambda v: Series(v).min(), None),
- (mom.rolling_sum, lambda v: Series(v).sum(), None),
- (mom.rolling_mean, lambda v: Series(v).mean(), None),
- (mom.rolling_std, lambda v: Series(v).std(), 1),
- (mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None),
- (mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None),
- (mom.rolling_var, lambda v: Series(v).var(), 1),
- #(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed
- #(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed
- #(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center),
- # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed
- (mom.rolling_median, lambda v: Series(v).median(), None),
- (mom.rolling_max, np.nanmax, 1),
- (mom.rolling_min, np.nanmin, 1),
- (mom.rolling_sum, np.nansum, 1),
- ]
- if np.__version__ >= LooseVersion('1.8.0'):
- base_functions += [
- (mom.rolling_mean, np.nanmean, 1),
- (mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1),
- (mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1),
- ]
- if np.__version__ >= LooseVersion('1.9.0'):
- base_functions += [
- (mom.rolling_median, np.nanmedian, 1),
- ]
- no_nan_functions = [
- (mom.rolling_max, np.max, None),
- (mom.rolling_min, np.min, None),
- (mom.rolling_sum, np.sum, None),
- (mom.rolling_mean, np.mean, None),
- (mom.rolling_std, lambda v: np.std(v, ddof=1), 1),
- (mom.rolling_var, lambda v: np.var(v, ddof=1), 1),
- (mom.rolling_median, np.median, None),
- ]
-
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
@@ -1195,11 +1162,14 @@ def test_rolling_consistency(self):
for (x, is_constant, no_nans) in self.data:
assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal
- functions = base_functions
+ functions = self.base_functions
+
# GH 8269
if no_nans:
- functions = base_functions + no_nan_functions
- for (rolling_f, f, require_min_periods) in functions:
+ functions = self.base_functions + self.no_nan_functions
+ for (f, require_min_periods, name) in functions:
+ rolling_f = getattr(mom,'rolling_{0}'.format(name))
+
if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods):
continue
@@ -1214,7 +1184,8 @@ def test_rolling_consistency(self):
rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center)
rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f,
min_periods=min_periods, center=center)
- assert_equal(rolling_f_result, rolling_apply_f_result)
+ if not tm._incompat_bottleneck_version(name):
+ assert_equal(rolling_f_result, rolling_apply_f_result)
if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame):
# test pairwise=True
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index aea165b907c05..e07d6cc3d9b90 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12471,7 +12471,9 @@ def test_stat_operators_attempt_obj_array(self):
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
- assert_series_equal(result, expected)
+
+ if not tm._incompat_bottleneck_version(meth):
+ assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
@@ -12696,9 +12698,10 @@ def wrapper(x):
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
- assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
- check_dtype=False,
- check_less_precise=check_less_precise)
+ if not tm._incompat_bottleneck_version(name):
+ assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
+ check_dtype=False,
+ check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
@@ -12727,8 +12730,9 @@ def wrapper(x):
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
- self.assertTrue(np.isnan(r0).all())
- self.assertTrue(np.isnan(r1).all())
+ if not tm._incompat_bottleneck_version(name):
+ self.assertTrue(np.isnan(r0).all())
+ self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index ec6ab4e0d2ab1..f7b6f947d8924 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -2014,7 +2014,10 @@ def test_cythonized_aggers(self):
df = DataFrame(data)
df.loc[2:10:2,'C'] = nan
- def _testit(op):
+ def _testit(name):
+
+ op = lambda x: getattr(x,name)()
+
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
@@ -2035,18 +2038,19 @@ def _testit(op):
exp.name = 'C'
result = op(grouped)['C']
- assert_series_equal(result, exp)
-
- _testit(lambda x: x.count())
- _testit(lambda x: x.sum())
- _testit(lambda x: x.std())
- _testit(lambda x: x.var())
- _testit(lambda x: x.sem())
- _testit(lambda x: x.mean())
- _testit(lambda x: x.median())
- _testit(lambda x: x.prod())
- _testit(lambda x: x.min())
- _testit(lambda x: x.max())
+ if not tm._incompat_bottleneck_version(name):
+ assert_series_equal(result, exp)
+
+ _testit('count')
+ _testit('sum')
+ _testit('std')
+ _testit('var')
+ _testit('sem')
+ _testit('mean')
+ _testit('median')
+ _testit('prod')
+ _testit('min')
+ _testit('max')
def test_max_min_non_numeric(self):
# #2700
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index a903b76b3ac7f..fe56d5d1da6bd 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -9,12 +9,13 @@
import pandas.core.nanops as nanops
import pandas.util.testing as tm
-nanops._USE_BOTTLENECK = False
-
+use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(tm.TestCase):
+
def setUp(self):
np.random.seed(11235)
+ nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
@@ -116,6 +117,9 @@ def setUp(self):
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
+ def tearDown(self):
+ nanops._USE_BOTTLENECK = use_bn
+
def check_results(self, targ, res, axis):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 9cdc769dd7d74..64edf29915206 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -168,7 +168,8 @@ def wrapper(x):
for i in range(obj.ndim):
result = f(axis=i)
- assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
+ if not tm._incompat_bottleneck_version(name):
+ assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 289f7f134aa27..3772d4b9c272b 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -144,7 +144,8 @@ def wrapper(x):
for i in range(obj.ndim):
result = f(axis=i)
- assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
+ if not tm._incompat_bottleneck_version(name):
+ assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index a195455c116fb..878bfdf3ac9fd 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -246,6 +246,38 @@ def _skip_if_python26():
import nose
raise nose.SkipTest("skipping on python2.6")
+def _incompat_bottleneck_version(method):
+ """ skip if we have bottleneck installed
+ and its >= 1.0
+ as we don't match the nansum/nanprod behavior for all-nan
+ ops, see GH9422
+ """
+ if method not in ['sum','prod']:
+ return False
+ try:
+ import bottleneck as bn
+ return bn.__version__ >= LooseVersion('1.0')
+ except ImportError:
+ return False
+
+def skip_if_no_ne(engine='numexpr'):
+ import nose
+ _USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR
+
+ if engine == 'numexpr':
+ try:
+ import numexpr as ne
+ except ImportError:
+ raise nose.SkipTest("numexpr not installed")
+
+ if not _USE_NUMEXPR:
+ raise nose.SkipTest("numexpr disabled")
+
+ if ne.__version__ < LooseVersion('2.0'):
+ raise nose.SkipTest("numexpr version too low: "
+ "%s" % ne.__version__)
+
+
#------------------------------------------------------------------------------
# locale utilities
@@ -1986,24 +2018,6 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always",
% extra_warnings)
-def skip_if_no_ne(engine='numexpr'):
- import nose
- _USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR
-
- if engine == 'numexpr':
- try:
- import numexpr as ne
- except ImportError:
- raise nose.SkipTest("numexpr not installed")
-
- if not _USE_NUMEXPR:
- raise nose.SkipTest("numexpr disabled")
-
- if ne.__version__ < LooseVersion('2.0'):
- raise nose.SkipTest("numexpr version too low: "
- "%s" % ne.__version__)
-
-
def disabled(t):
t.disabled = True
return t
| xref #9422
| https://api.github.com/repos/pandas-dev/pandas/pulls/10986 | 2015-09-04T03:26:51Z | 2015-09-04T12:57:48Z | 2015-09-04T12:57:48Z | 2015-09-04T12:57:48Z |
TST/DOC #10846 Test and document use of SQLAlchemy expressions in read_sql() | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 5ad9af310225d..c05b2555dfeda 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -3918,6 +3918,42 @@ connecting to.
For more information see the examples the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__
+Advanced SQLAlchemy queries
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use SQLAlchemy constructs to describe your query.
+
+Use :func:`sqlalchemy.text` to specify query parameters in a backend-neutral way
+
+.. ipython:: python
+
+ import sqlalchemy as sa
+ pd.read_sql(sa.text('SELECT * FROM data where Col_1=:col1'), engine, params={'col1': 'X'})
+
+If you have an SQLAlchemy description of your database you can express where conditions using SQLAlchemy expressions
+
+.. ipython:: python
+
+ metadata = sa.MetaData()
+ data_table = sa.Table('data', metadata,
+ sa.Column('index', sa.Integer),
+ sa.Column('Date', sa.DateTime),
+ sa.Column('Col_1', sa.String),
+ sa.Column('Col_2', sa.Float),
+ sa.Column('Col_3', sa.Boolean),
+ )
+
+ pd.read_sql(sa.select([data_table]).where(data_table.c.Col_3 == True), engine)
+
+You can combine SQLAlchemy expressions with parameters passed to :func:`read_sql` using :func:`sqlalchemy.bindparam`
+
+.. ipython:: python
+
+ import datetime as dt
+ expr = sa.select([data_table]).where(data_table.c.Date > sa.bindparam('date'))
+ pd.read_sql(expr, engine, params={'date': dt.datetime(2010, 10, 18)})
+
+
Sqlite fallback
'''''''''''''''
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 2ed0126505c41..34f28e2fbfacb 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -364,9 +364,9 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
Parameters
----------
- sql : string
- SQL query to be executed
- con : SQLAlchemy connectable(engine/connection) or database string URI
+ sql : string SQL query or SQLAlchemy Selectable (select or text object)
+ to be executed.
+ con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
@@ -423,8 +423,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
Parameters
----------
- sql : string
- SQL query to be executed or database table name.
+ sql : string SQL query or SQLAlchemy Selectable (select or text object)
+ to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index d61c5f0740a91..15e241dae895e 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -951,6 +951,35 @@ def test_to_sql_read_sql_with_database_uri(self):
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
+ def _make_iris_table_metadata(self):
+ sa = sqlalchemy
+ metadata = sa.MetaData()
+ iris = sa.Table('iris', metadata,
+ sa.Column('SepalLength', sa.REAL),
+ sa.Column('SepalWidth', sa.REAL),
+ sa.Column('PetalLength', sa.REAL),
+ sa.Column('PetalWidth', sa.REAL),
+ sa.Column('Name', sa.TEXT)
+ )
+
+ return iris
+
+ def test_query_by_text_obj(self):
+ # WIP : GH10846
+ name_text = sqlalchemy.text('select * from iris where name=:name')
+ iris_df = sql.read_sql(name_text, self.conn, params={'name': 'Iris-versicolor'})
+ all_names = set(iris_df['Name'])
+ self.assertEqual(all_names, set(['Iris-versicolor']))
+
+ def test_query_by_select_obj(self):
+ # WIP : GH10846
+ iris = self._make_iris_table_metadata()
+
+ name_select = sqlalchemy.select([iris]).where(iris.c.Name == sqlalchemy.bindparam('name'))
+ iris_df = sql.read_sql(name_select, self.conn, params={'name': 'Iris-setosa'})
+ all_names = set(iris_df['Name'])
+ self.assertEqual(all_names, set(['Iris-setosa']))
+
class _EngineToConnMixin(object):
"""
| This provides 2 tests using SQLALchemy expressions to read SQL data into a dataframe and accompanying documentation.
It attempts to address #10846 by explaining how this feature is already supported by pandas.
Closes #10846
| https://api.github.com/repos/pandas-dev/pandas/pulls/10983 | 2015-09-03T16:18:35Z | 2015-09-22T12:45:07Z | 2015-09-22T12:45:07Z | 2015-09-22T12:45:07Z |
DOC: Examples for Series.apply docstring | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2890730956c75..116ae9f31b5a4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2064,13 +2064,84 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
+ Returns
+ -------
+ y : Series or DataFrame if func returns a Series
+
See also
--------
Series.map: For element-wise operations
- Returns
- -------
- y : Series or DataFrame if func returns a Series
+ Examples
+ --------
+
+ Create a series with typical summer temperatures for each city.
+
+ >>> import pandas as pd
+ >>> import numpy as np
+ >>> series = pd.Series([20, 21, 12], index=['London',
+ ... 'New York','Helsinki'])
+ London 20
+ New York 21
+ Helsinki 12
+ dtype: int64
+
+ Square the values by defining a function and passing it as an
+ argument to ``apply()``.
+
+ >>> def square(x):
+ ... return x**2
+ >>> series.apply(square)
+ London 400
+ New York 441
+ Helsinki 144
+ dtype: int64
+
+ Square the values by passing an anonymous function as an
+ argument to ``apply()``.
+
+ >>> series.apply(lambda x: x**2)
+ London 400
+ New York 441
+ Helsinki 144
+ dtype: int64
+
+ Define a custom function that needs additional positional
+ arguments and pass these additional arguments using the
+ ``args`` keyword.
+
+ >>> def subtract_custom_value(x, custom_value):
+ ... return x-custom_value
+
+ >>> series.apply(subtract_custom_value, args=(5,))
+ London 15
+ New York 16
+ Helsinki 7
+ dtype: int64
+
+ Define a custom function that takes keyword arguments
+ and pass these arguments to ``apply``.
+
+ >>> def add_custom_values(x, **kwargs):
+ ... for month in kwargs:
+ ... x+=kwargs[month]
+ ... return x
+
+ >>> series.apply(add_custom_values, june=30, july=20, august=25)
+ London 95
+ New York 96
+ Helsinki 87
+ dtype: int64
+
+ Use a function from the Numpy library.
+
+ >>> series.apply(np.log)
+ London 2.995732
+ New York 3.044522
+ Helsinki 2.484907
+ dtype: float64
+
+
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
| I thought it might be nice to have some examples showing how to pass custom functions and functions with additional keywords.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10977 | 2015-09-03T00:07:23Z | 2015-09-04T22:18:42Z | 2015-09-04T22:18:42Z | 2015-09-04T22:18:51Z |
CLN: removes BinGrouper kind of cython methods | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 534117b8e9249..caa5d83da6b87 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1793,17 +1793,25 @@ def indices(self):
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
- comp_ids = np.repeat(np.arange(ngroups), np.diff(np.r_[0, self.bins]))
+ rep = np.diff(np.r_[0, self.bins])
+
+ if ngroups == len(self.bins):
+ comp_ids = np.repeat(np.arange(ngroups), rep)
+ else:
+ comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
+
return comp_ids, obs_group_ids, ngroups
@cache_readonly
def ngroups(self):
- return len(self.binlabels)
+ return len(self.result_index)
@cache_readonly
def result_index(self):
- mask = self.binlabels.asi8 == tslib.iNaT
- return self.binlabels[~mask]
+ if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
+ return self.binlabels[1:]
+
+ return self.binlabels
@property
def levels(self):
@@ -1839,40 +1847,14 @@ def size(self):
#----------------------------------------------------------------------
# cython aggregation
- _cython_functions = {
- 'add': 'group_add_bin',
- 'prod': 'group_prod_bin',
- 'mean': 'group_mean_bin',
- 'min': 'group_min_bin',
- 'max': 'group_max_bin',
- 'var': 'group_var_bin',
- 'ohlc': 'group_ohlc',
- 'first': {
- 'name': 'group_nth_bin',
- 'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
- },
- 'last': 'group_last_bin',
- 'count': 'group_count_bin',
- }
+ _cython_functions = {'ohlc': 'group_ohlc'}
+ _cython_functions.update(BaseGrouper._cython_functions)
+ _cython_functions.pop('median')
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
- def _aggregate(self, result, counts, values, agg_func, is_numeric=True):
-
- if values.ndim > 3:
- # punting for now
- raise NotImplementedError("number of dimensions is currently "
- "limited to 3")
- elif values.ndim > 2:
- for i, chunk in enumerate(values.transpose(2, 0, 1)):
- agg_func(result[:, :, i], counts, chunk, self.bins)
- else:
- agg_func(result, counts, values, self.bins)
-
- return result
-
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py
index 29a991a9acfd3..c086919d94644 100644
--- a/pandas/src/generate_code.py
+++ b/pandas/src/generate_code.py
@@ -751,105 +751,6 @@ def group_last_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
out[i, j] = resx[i, j]
"""
-group_last_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(c_type)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = %(nan_val)s
- else:
- out[i, j] = resx[i, j]
-"""
-
-group_nth_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(c_type)s, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bin) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = %(nan_val)s
- else:
- out[i, j] = resx[i, j]
-"""
-
group_nth_template = """@cython.wraparound(False)
@cython.boundscheck(False)
def group_nth_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
@@ -961,69 +862,6 @@ def group_add_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
out[i, j] = sumx[i, j]
"""
-group_add_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_add_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b, nbins
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
-
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j]
-"""
-
group_prod_template = """@cython.wraparound(False)
@cython.boundscheck(False)
def group_prod_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
@@ -1083,68 +921,6 @@ def group_prod_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
out[i, j] = prodx[i, j]
"""
-group_prod_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] prodx, nobs
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- prodx[b, j] *= val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- prodx[b, 0] *= val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-"""
-
group_var_template = """@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@@ -1195,72 +971,6 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
"""
-group_var_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_var_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
-
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, ct
- ndarray[%(dest_type2)s, ndim=2] nobs, sumx, sumxx
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- sumxx[b, j] += val * val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
- sumxx[b, 0] += val * val
-
- for i in range(ngroups):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
-"""
-
group_count_template = """@cython.boundscheck(False)
@cython.wraparound(False)
def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
@@ -1299,115 +1009,12 @@ def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
%(tab)s out[i, j] = nobs[i, j]
"""
-group_count_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(c_type)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- %(c_type)s val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
- %(nogil)s
- %(tab)sfor i in range(N):
- %(tab)s while b < ngroups - 1 and i >= bins[b]:
- %(tab)s b += 1
-
- %(tab)s counts[b] += 1
- %(tab)s for j in range(K):
- %(tab)s val = values[i, j]
-
- %(tab)s # not nan
- %(tab)s nobs[b, j] += val == val and val != iNaT
-
- %(tab)sfor i in range(ngroups):
- %(tab)s for j in range(K):
- %(tab)s out[i, j] = nobs[i, j]
-"""
-
# add passing bin edges, instead of labels
#----------------------------------------------------------------------
# group_min, group_max
-group_min_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] minx, nobs
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(%(inf_val)s)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if val < minx[b, j]:
- minx[b, j] = val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- if val < minx[b, 0]:
- minx[b, 0] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = %(nan_val)s
- else:
- out[i, j] = minx[i, j]
-"""
-
group_max_template = """@cython.wraparound(False)
@cython.boundscheck(False)
def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
@@ -1471,72 +1078,6 @@ def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
out[i, j] = maxx[i, j]
"""
-group_max_bin_template = """@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_max_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] maxx, nobs
-
- nobs = np.zeros_like(out)
- maxx = np.empty_like(out)
- maxx.fill(-%(inf_val)s)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if val > maxx[b, j]:
- maxx[b, j] = val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- if val > maxx[b, 0]:
- maxx[b, 0] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = %(nan_val)s
- else:
- out[i, j] = maxx[i, j]
-"""
-
-
group_min_template = """@cython.wraparound(False)
@cython.boundscheck(False)
def group_min_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
@@ -1656,141 +1197,50 @@ def group_mean_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
out[i, j] = sumx[i, j] / count
"""
-group_mean_bin_template = """
-@cython.boundscheck(False)
-def group_mean_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- %(dest_type2)s val, count
- ndarray[%(dest_type2)s, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- count = nobs[i, j]
- if count == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-"""
-
group_ohlc_template = """@cython.wraparound(False)
@cython.boundscheck(False)
def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
ndarray[int64_t] counts,
ndarray[%(dest_type2)s, ndim=2] values,
- ndarray[int64_t] bins):
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab
%(dest_type2)s val, count
- %(dest_type2)s vopen, vhigh, vlow, vclose
- bint got_first = 0
+ Py_ssize_t ngroups = len(counts)
- if len(bins) == 0:
+ if len(labels) == 0:
return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
N, K = (<object> values).shape
if out.shape[1] != 4:
raise ValueError('Output array must have 4 columns')
- b = 0
if K > 1:
raise NotImplementedError("Argument 'values' must have only "
"one dimension")
- else:
+ out.fill(np.nan)
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
- else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
- b += 1
- got_first = 0
-
- counts[b] += 1
- val = values[i, 0]
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab == -1:
+ continue
- # not nan
- if val == val:
- if not got_first:
- got_first = 1
- vopen = val
- vlow = val
- vhigh = val
- else:
- if val < vlow:
- vlow = val
- if val > vhigh:
- vhigh = val
- vclose = val
-
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
+ counts[lab] += 1
+ val = values[i, 0]
+ if val != val:
+ continue
+
+ if out[lab, 0] != out[lab, 0]:
+ out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
+ out[lab, 1] = max(out[lab, 1], val)
+ out[lab, 2] = min(out[lab, 2], val)
+ out[lab, 3] = val
"""
arrmap_template = """@cython.wraparound(False)
@@ -2534,26 +1984,18 @@ def generate_from_template(template, exclude=None):
put_2d = [diff_2d_template]
groupbys = [group_add_template,
- group_add_bin_template,
group_prod_template,
- group_prod_bin_template,
group_var_template,
- group_var_bin_template,
group_mean_template,
- group_mean_bin_template,
group_ohlc_template]
groupby_selection = [group_last_template,
- group_last_bin_template,
- group_nth_template,
- group_nth_bin_template]
+ group_nth_template]
groupby_min_max = [group_min_template,
- group_min_bin_template,
- group_max_template,
- group_max_bin_template]
+ group_max_template]
-groupby_count = [group_count_template, group_count_bin_template]
+groupby_count = [group_count_template]
templates_1d = [map_indices_template,
pad_template,
diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx
index d4cf7824c8911..c0ecd04749e58 100644
--- a/pandas/src/generated.pyx
+++ b/pandas/src/generated.pyx
@@ -6865,131 +6865,6 @@ def group_add_float32(ndarray[float32_t, ndim=2] out,
out[i, j] = sumx[i, j]
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_add_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b, nbins
- float64_t val, count
- ndarray[float64_t, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
-
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_add_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b, nbins
- float32_t val, count
- ndarray[float32_t, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
-
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j]
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def group_prod_float64(ndarray[float64_t, ndim=2] out,
@@ -7107,129 +6982,6 @@ def group_prod_float32(ndarray[float32_t, ndim=2] out,
out[i, j] = prodx[i, j]
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, count
- ndarray[float64_t, ndim=2] prodx, nobs
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- prodx[b, j] *= val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- prodx[b, 0] *= val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_prod_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, count
- ndarray[float32_t, ndim=2] prodx, nobs
-
- nobs = np.zeros_like(out)
- prodx = np.ones_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
- N, K = (<object> values).shape
-
- with nogil:
-
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- prodx[b, j] *= val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- prodx[b, 0] *= val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = prodx[i, j]
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@@ -7329,137 +7081,6 @@ def group_var_float32(ndarray[float32_t, ndim=2] out,
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_var_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
-
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, ct
- ndarray[float64_t, ndim=2] nobs, sumx, sumxx
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- sumxx[b, j] += val * val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
- sumxx[b, 0] += val * val
-
- for i in range(ngroups):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_var_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
-
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, ct
- ndarray[float32_t, ndim=2] nobs, sumx, sumxx
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
- sumxx = np.zeros_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
-
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- sumxx[b, j] += val * val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
- sumxx[b, 0] += val * val
-
- for i in range(ngroups):
- for j in range(K):
- ct = nobs[i, j]
- if ct < 2:
- out[i, j] = NAN
- else:
- out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
- (ct * ct - ct))
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def group_mean_float64(ndarray[float64_t, ndim=2] out,
@@ -7569,276 +7190,95 @@ def group_mean_float32(ndarray[float32_t, ndim=2] out,
out[i, j] = sumx[i, j] / count
-
-@cython.boundscheck(False)
-def group_mean_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, count
- ndarray[float64_t, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- count = nobs[i, j]
- if count == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-
-
-@cython.boundscheck(False)
-def group_mean_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, count
- ndarray[float32_t, ndim=2] sumx, nobs
-
- nobs = np.zeros_like(out)
- sumx = np.zeros_like(out)
-
- N, K = (<object> values).shape
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- sumx[b, j] += val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- sumx[b, 0] += val
-
- for i in range(ngroups):
- for j in range(K):
- count = nobs[i, j]
- if count == 0:
- out[i, j] = NAN
- else:
- out[i, j] = sumx[i, j] / count
-
-
-@cython.wraparound(False)
+@cython.wraparound(False)
@cython.boundscheck(False)
def group_ohlc_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab
float64_t val, count
- float64_t vopen, vhigh, vlow, vclose
- bint got_first = 0
+ Py_ssize_t ngroups = len(counts)
- if len(bins) == 0:
+ if len(labels) == 0:
return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
N, K = (<object> values).shape
if out.shape[1] != 4:
raise ValueError('Output array must have 4 columns')
- b = 0
if K > 1:
raise NotImplementedError("Argument 'values' must have only "
"one dimension")
- else:
+ out.fill(np.nan)
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
- else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
- b += 1
- got_first = 0
-
- counts[b] += 1
- val = values[i, 0]
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab == -1:
+ continue
- # not nan
- if val == val:
- if not got_first:
- got_first = 1
- vopen = val
- vlow = val
- vhigh = val
- else:
- if val < vlow:
- vlow = val
- if val > vhigh:
- vhigh = val
- vclose = val
-
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
+ counts[lab] += 1
+ val = values[i, 0]
+ if val != val:
+ continue
+
+ if out[lab, 0] != out[lab, 0]:
+ out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
+ out[lab, 1] = max(out[lab, 1], val)
+ out[lab, 2] = min(out[lab, 2], val)
+ out[lab, 3] = val
@cython.wraparound(False)
@cython.boundscheck(False)
def group_ohlc_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab
float32_t val, count
- float32_t vopen, vhigh, vlow, vclose
- bint got_first = 0
+ Py_ssize_t ngroups = len(counts)
- if len(bins) == 0:
+ if len(labels) == 0:
return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
N, K = (<object> values).shape
if out.shape[1] != 4:
raise ValueError('Output array must have 4 columns')
- b = 0
if K > 1:
raise NotImplementedError("Argument 'values' must have only "
"one dimension")
- else:
+ out.fill(np.nan)
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
- else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
- b += 1
- got_first = 0
-
- counts[b] += 1
- val = values[i, 0]
+ with nogil:
+ for i in range(N):
+ lab = labels[i]
+ if lab == -1:
+ continue
- # not nan
- if val == val:
- if not got_first:
- got_first = 1
- vopen = val
- vlow = val
- vhigh = val
- else:
- if val < vlow:
- vlow = val
- if val > vhigh:
- vhigh = val
- vclose = val
-
- if not got_first:
- out[b, 0] = NAN
- out[b, 1] = NAN
- out[b, 2] = NAN
- out[b, 3] = NAN
+ counts[lab] += 1
+ val = values[i, 0]
+ if val != val:
+ continue
+
+ if out[lab, 0] != out[lab, 0]:
+ out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val
else:
- out[b, 0] = vopen
- out[b, 1] = vhigh
- out[b, 2] = vlow
- out[b, 3] = vclose
+ out[lab, 1] = max(out[lab, 1], val)
+ out[lab, 2] = min(out[lab, 2], val)
+ out[lab, 3] = val
@cython.wraparound(False)
@@ -7977,151 +7417,6 @@ def group_last_int64(ndarray[int64_t, ndim=2] out,
out[i, j] = resx[i, j]
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, count
- ndarray[float64_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, count
- ndarray[float32_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_last_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- int64_t val, count
- ndarray[int64_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = resx[i, j]
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def group_nth_float64(ndarray[float64_t, ndim=2] out,
@@ -8263,538 +7558,7 @@ def group_nth_int64(ndarray[int64_t, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_nth_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, count
- ndarray[float64_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bin) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, count
- ndarray[float32_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bin) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = resx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_nth_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- int64_t val, count
- ndarray[int64_t, ndim=2] resx, nobs
-
- nobs = np.zeros_like(out)
- resx = np.empty_like(out)
-
- if len(bin) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = resx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float64_t val, count
- ndarray[float64_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- float32_t val, count
- ndarray[float32_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] labels):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- int64_t val, count
- ndarray[int64_t, ndim=2] minx, nobs
-
- if not len(values) == len(labels):
- raise AssertionError("len(index) != len(labels)")
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(9223372036854775807)
-
- N, K = (<object> values).shape
-
- with nogil:
- if K > 1:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[lab, j] += 1
- if val < minx[lab, j]:
- minx[lab, j] = val
- else:
- for i in range(N):
- lab = labels[i]
- if lab < 0:
- continue
-
- counts[lab] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[lab, 0] += 1
- if val < minx[lab, 0]:
- minx[lab, 0] = val
-
- for i in range(ncounts):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = minx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float64_t val, count
- ndarray[float64_t, ndim=2] minx, nobs
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if val < minx[b, j]:
- minx[b, j] = val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- if val < minx[b, 0]:
- minx[b, 0] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- float32_t val, count
- ndarray[float32_t, ndim=2] minx, nobs
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(np.inf)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if val < minx[b, j]:
- minx[b, j] = val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- if val < minx[b, 0]:
- minx[b, 0] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = NAN
- else:
- out[i, j] = minx[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_min_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- int64_t val, count
- ndarray[int64_t, ndim=2] minx, nobs
-
- nobs = np.zeros_like(out)
-
- minx = np.empty_like(out)
- minx.fill(9223372036854775807)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- with nogil:
- b = 0
- if K > 1:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if val < minx[b, j]:
- minx[b, j] = val
- else:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- val = values[i, 0]
-
- # not nan
- if val == val:
- nobs[b, 0] += 1
- if val < minx[b, 0]:
- minx[b, 0] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = iNaT
- else:
- out[i, j] = minx[i, j]
-
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_max_float64(ndarray[float64_t, ndim=2] out,
+def group_min_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
ndarray[int64_t] labels):
@@ -8804,15 +7568,15 @@ def group_max_float64(ndarray[float64_t, ndim=2] out,
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float64_t val, count
- ndarray[float64_t, ndim=2] maxx, nobs
+ ndarray[float64_t, ndim=2] minx, nobs
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- maxx = np.empty_like(out)
- maxx.fill(-np.inf)
+ minx = np.empty_like(out)
+ minx.fill(np.inf)
N, K = (<object> values).shape
@@ -8830,8 +7594,8 @@ def group_max_float64(ndarray[float64_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if val < minx[lab, j]:
+ minx[lab, j] = val
else:
for i in range(N):
lab = labels[i]
@@ -8844,19 +7608,19 @@ def group_max_float64(ndarray[float64_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
+ if val < minx[lab, 0]:
+ minx[lab, 0] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = NAN
else:
- out[i, j] = maxx[i, j]
+ out[i, j] = minx[i, j]
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_float32(ndarray[float32_t, ndim=2] out,
+def group_min_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float32_t, ndim=2] values,
ndarray[int64_t] labels):
@@ -8866,15 +7630,15 @@ def group_max_float32(ndarray[float32_t, ndim=2] out,
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float32_t val, count
- ndarray[float32_t, ndim=2] maxx, nobs
+ ndarray[float32_t, ndim=2] minx, nobs
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- maxx = np.empty_like(out)
- maxx.fill(-np.inf)
+ minx = np.empty_like(out)
+ minx.fill(np.inf)
N, K = (<object> values).shape
@@ -8892,8 +7656,8 @@ def group_max_float32(ndarray[float32_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if val < minx[lab, j]:
+ minx[lab, j] = val
else:
for i in range(N):
lab = labels[i]
@@ -8906,19 +7670,19 @@ def group_max_float32(ndarray[float32_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
+ if val < minx[lab, 0]:
+ minx[lab, 0] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = NAN
else:
- out[i, j] = maxx[i, j]
+ out[i, j] = minx[i, j]
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_int64(ndarray[int64_t, ndim=2] out,
+def group_min_int64(ndarray[int64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[int64_t, ndim=2] values,
ndarray[int64_t] labels):
@@ -8928,15 +7692,15 @@ def group_max_int64(ndarray[int64_t, ndim=2] out,
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
int64_t val, count
- ndarray[int64_t, ndim=2] maxx, nobs
+ ndarray[int64_t, ndim=2] minx, nobs
if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")
nobs = np.zeros_like(out)
- maxx = np.empty_like(out)
- maxx.fill(-9223372036854775807)
+ minx = np.empty_like(out)
+ minx.fill(9223372036854775807)
N, K = (<object> values).shape
@@ -8954,8 +7718,8 @@ def group_max_int64(ndarray[int64_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, j] += 1
- if val > maxx[lab, j]:
- maxx[lab, j] = val
+ if val < minx[lab, j]:
+ minx[lab, j] = val
else:
for i in range(N):
lab = labels[i]
@@ -8968,75 +7732,73 @@ def group_max_int64(ndarray[int64_t, ndim=2] out,
# not nan
if val == val:
nobs[lab, 0] += 1
- if val > maxx[lab, 0]:
- maxx[lab, 0] = val
+ if val < minx[lab, 0]:
+ minx[lab, 0] = val
for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = iNaT
else:
- out[i, j] = maxx[i, j]
+ out[i, j] = minx[i, j]
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
+def group_max_float64(ndarray[float64_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[float64_t, ndim=2] values,
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float64_t val, count
ndarray[float64_t, ndim=2] maxx, nobs
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
nobs = np.zeros_like(out)
+
maxx = np.empty_like(out)
maxx.fill(-np.inf)
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
N, K = (<object> values).shape
with nogil:
- b = 0
if K > 1:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
for j in range(K):
val = values[i, j]
# not nan
if val == val:
- nobs[b, j] += 1
- if val > maxx[b, j]:
- maxx[b, j] = val
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
else:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
val = values[i, 0]
# not nan
if val == val:
- nobs[b, 0] += 1
- if val > maxx[b, 0]:
- maxx[b, 0] = val
+ nobs[lab, 0] += 1
+ if val > maxx[lab, 0]:
+ maxx[lab, 0] = val
- for i in range(ngroups):
+ for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = NAN
@@ -9045,62 +7807,60 @@ def group_max_bin_float64(ndarray[float64_t, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
+def group_max_float32(ndarray[float32_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[float32_t, ndim=2] values,
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float32_t val, count
ndarray[float32_t, ndim=2] maxx, nobs
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
nobs = np.zeros_like(out)
+
maxx = np.empty_like(out)
maxx.fill(-np.inf)
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
N, K = (<object> values).shape
with nogil:
- b = 0
if K > 1:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
for j in range(K):
val = values[i, j]
# not nan
if val == val:
- nobs[b, j] += 1
- if val > maxx[b, j]:
- maxx[b, j] = val
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
else:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
val = values[i, 0]
# not nan
if val == val:
- nobs[b, 0] += 1
- if val > maxx[b, 0]:
- maxx[b, 0] = val
+ nobs[lab, 0] += 1
+ if val > maxx[lab, 0]:
+ maxx[lab, 0] = val
- for i in range(ngroups):
+ for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = NAN
@@ -9109,62 +7869,60 @@ def group_max_bin_float32(ndarray[float32_t, ndim=2] out,
@cython.wraparound(False)
@cython.boundscheck(False)
-def group_max_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins):
+def group_max_int64(ndarray[int64_t, ndim=2] out,
+ ndarray[int64_t] counts,
+ ndarray[int64_t, ndim=2] values,
+ ndarray[int64_t] labels):
'''
Only aggregates on axis=0
'''
cdef:
- Py_ssize_t i, j, N, K, ngroups, b
+ Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
int64_t val, count
ndarray[int64_t, ndim=2] maxx, nobs
+ if not len(values) == len(labels):
+ raise AssertionError("len(index) != len(labels)")
+
nobs = np.zeros_like(out)
+
maxx = np.empty_like(out)
maxx.fill(-9223372036854775807)
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
N, K = (<object> values).shape
with nogil:
- b = 0
if K > 1:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
for j in range(K):
val = values[i, j]
# not nan
if val == val:
- nobs[b, j] += 1
- if val > maxx[b, j]:
- maxx[b, j] = val
+ nobs[lab, j] += 1
+ if val > maxx[lab, j]:
+ maxx[lab, j] = val
else:
for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
+ lab = labels[i]
+ if lab < 0:
+ continue
- counts[b] += 1
+ counts[lab] += 1
val = values[i, 0]
# not nan
if val == val:
- nobs[b, 0] += 1
- if val > maxx[b, 0]:
- maxx[b, 0] = val
+ nobs[lab, 0] += 1
+ if val > maxx[lab, 0]:
+ maxx[lab, 0] = val
- for i in range(ngroups):
+ for i in range(ncounts):
for j in range(K):
if nobs[i, j] == 0:
out[i, j] = iNaT
@@ -9358,187 +8116,6 @@ def group_count_int64(ndarray[int64_t, ndim=2] out,
out[i, j] = nobs[i, j]
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_float64(ndarray[float64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- float64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[b, j] += val == val and val != iNaT
-
- for i in range(ngroups):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_float32(ndarray[float32_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[float32_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- float32_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[b, j] += val == val and val != iNaT
-
- for i in range(ngroups):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- int64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[b, j] += val == val and val != iNaT
-
- for i in range(ngroups):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- object val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
-
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[b, j] += val == val and val != iNaT
-
- for i in range(ngroups):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def group_count_bin_int64(ndarray[int64_t, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[int64_t, ndim=2] values,
- ndarray[int64_t] bins):
- '''
- Only aggregates on axis=0
- '''
- cdef:
- Py_ssize_t i, j, ngroups
- Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0
- int64_t val
- ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]),
- dtype=np.int64)
-
- if len(bins) == 0:
- return
- ngroups = len(bins) + (bins[len(bins) - 1] != N)
-
- with nogil:
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- nobs[b, j] += val == val and val != iNaT
-
- for i in range(ngroups):
- for j in range(K):
- out[i, j] = nobs[i, j]
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def left_join_indexer_unique_float64(ndarray[float64_t] left,
diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py
index 6dd43539eeabf..566fd54f3b024 100644
--- a/pandas/tests/test_tseries.py
+++ b/pandas/tests/test_tseries.py
@@ -474,78 +474,19 @@ def test_generate_bins(self):
self.assertRaises(ValueError, generate_bins_generic,
values, [-3, -1], 'right')
- def test_group_bin_functions(self):
-
- dtypes = ['float32','float64']
- funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
-
- np_funcs = {
- 'add': np.sum,
- 'mean': np.mean,
- 'prod': np.prod,
- 'min': np.min,
- 'max': np.max,
- 'var': lambda x: x.var(ddof=1) if len(x) >= 2 else np.nan
- }
-
- for fname in funcs:
- for d in dtypes:
- check_less_precise = False
- if d == 'float32':
- check_less_precise = True
- args = [getattr(algos, 'group_%s_%s' % (fname,d)),
- getattr(algos, 'group_%s_bin_%s' % (fname,d)),
- np_funcs[fname],
- d,
- check_less_precise]
- self._check_versions(*args)
-
- def _check_versions(self, irr_func, bin_func, np_func, dtype, check_less_precise):
- obj = self.obj.astype(dtype)
-
- cts = np.zeros(3, dtype=np.int64)
- exp = np.zeros((3, 1), dtype)
- irr_func(exp, cts, obj, self.labels)
-
- # bin-based version
- bins = np.array([3, 6], dtype=np.int64)
- out = np.zeros((3, 1), dtype)
- counts = np.zeros(len(out), dtype=np.int64)
- bin_func(out, counts, obj, bins)
-
- assert_almost_equal(out, exp, check_less_precise=check_less_precise)
-
- bins = np.array([3, 9, 10], dtype=np.int64)
- out = np.zeros((3, 1), dtype)
- counts = np.zeros(len(out), dtype=np.int64)
- bin_func(out, counts, obj, bins)
- exp = np.array([np_func(obj[:3]), np_func(obj[3:9]),
- np_func(obj[9:])],
- dtype=dtype)
- assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise)
-
- # duplicate bins
- bins = np.array([3, 6, 10, 10], dtype=np.int64)
- out = np.zeros((4, 1), dtype)
- counts = np.zeros(len(out), dtype=np.int64)
- bin_func(out, counts, obj, bins)
- exp = np.array([np_func(obj[:3]), np_func(obj[3:6]),
- np_func(obj[6:10]), np.nan],
- dtype=dtype)
- assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise)
-
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20),dtype=dtype)
- bins = np.array([6, 12], dtype=np.int64)
+ bins = np.array([6, 12, 20], dtype=np.int64)
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
+ labels = np.repeat(np.arange(3), np.diff(np.r_[0, bins]))
func = getattr(algos,'group_ohlc_%s' % dtype)
- func(out, counts, obj[:, None], bins)
+ func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isnull(group).all():
@@ -559,7 +500,7 @@ def _ohlc(group):
assert_almost_equal(counts, [6, 6, 8])
obj[:6] = nan
- func(out, counts, obj[:, None], bins)
+ func(out, counts, obj[:, None], labels)
expected[0] = nan
assert_almost_equal(out, expected)
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 0bee6f514cad0..49d344631e4b9 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -1596,7 +1596,7 @@ def test_aggregate_with_nat(self):
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
- for func in ['min', 'max', 'prod']:
+ for func in ['min', 'max', 'sum', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]],
@@ -1606,7 +1606,7 @@ def test_aggregate_with_nat(self):
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
assert_frame_equal(expected, dt_result)
- for func in ['count', 'sum']:
+ for func in ['count']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10976 | 2015-09-02T23:55:08Z | 2015-09-03T13:02:09Z | 2015-09-03T13:02:09Z | 2015-09-04T18:40:03Z | |
BUG: repr of Periods in a Series is broken | diff --git a/pandas/core/format.py b/pandas/core/format.py
index d463c02dd41a2..818391d6eec23 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -2106,7 +2106,7 @@ def _format_strings(self):
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
- values = np.array(self.values.to_native_types(), dtype=object)
+ values = PeriodIndex(self.values).to_native_types()
formatter = self.formatter or (lambda x: '%s' % x)
fmt_values = [formatter(x) for x in values]
return fmt_values
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 5741e9cf9c093..37fa72db77cb9 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -139,6 +139,51 @@ def test_representation(self):
result = getattr(idx, func)()
self.assertEqual(result, expected)
+ def test_representation_to_series(self):
+ idx1 = DatetimeIndex([], freq='D')
+ idx2 = DatetimeIndex(['2011-01-01'], freq='D')
+ idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
+ freq='H', tz='Asia/Tokyo')
+ idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
+ tz='US/Eastern')
+ idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
+
+ exp1 = """Series([], dtype: datetime64[ns])"""
+
+ exp2 = """0 2011-01-01
+dtype: datetime64[ns]"""
+
+ exp3 = """0 2011-01-01
+1 2011-01-02
+dtype: datetime64[ns]"""
+
+ exp4 = """0 2011-01-01
+1 2011-01-02
+2 2011-01-03
+dtype: datetime64[ns]"""
+
+ exp5 = """0 2011-01-01 09:00:00+09:00
+1 2011-01-01 10:00:00+09:00
+2 2011-01-01 11:00:00+09:00
+dtype: object"""
+
+ exp6 = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 NaN
+dtype: object"""
+
+ exp7 = """0 2011-01-01 09:00:00
+1 2011-01-02 10:15:00
+dtype: datetime64[ns]"""
+
+ with pd.option_context('display.width', 300):
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7],
+ [exp1, exp2, exp3, exp4, exp5, exp6, exp7]):
+ result = repr(Series(idx))
+ self.assertEqual(result, expected)
+
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
@@ -536,6 +581,38 @@ def test_representation(self):
result = getattr(idx, func)()
self.assertEqual(result, expected)
+ def test_representation_to_series(self):
+ idx1 = TimedeltaIndex([], freq='D')
+ idx2 = TimedeltaIndex(['1 days'], freq='D')
+ idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
+ idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
+ idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
+
+ exp1 = """Series([], dtype: timedelta64[ns])"""
+
+ exp2 = """0 1 days
+dtype: timedelta64[ns]"""
+
+ exp3 = """0 1 days
+1 2 days
+dtype: timedelta64[ns]"""
+
+ exp4 = """0 1 days
+1 2 days
+2 3 days
+dtype: timedelta64[ns]"""
+
+ exp5 = """0 1 days 00:00:01
+1 2 days 00:00:00
+2 3 days 00:00:00
+dtype: timedelta64[ns]"""
+
+ with pd.option_context('display.width',300):
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
+ [exp1, exp2, exp3, exp4, exp5]):
+ result = repr(pd.Series(idx))
+ self.assertEqual(result, expected)
+
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
@@ -1145,6 +1222,60 @@ def test_representation(self):
result = getattr(idx, func)()
self.assertEqual(result, expected)
+ def test_representation_to_series(self):
+ # GH 10971
+ idx1 = PeriodIndex([], freq='D')
+ idx2 = PeriodIndex(['2011-01-01'], freq='D')
+ idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
+ idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
+ idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
+ idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
+
+ idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
+ idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
+ idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
+
+ exp1 = """Series([], dtype: object)"""
+
+ exp2 = """0 2011-01-01
+dtype: object"""
+
+ exp3 = """0 2011-01-01
+1 2011-01-02
+dtype: object"""
+
+ exp4 = """0 2011-01-01
+1 2011-01-02
+2 2011-01-03
+dtype: object"""
+
+ exp5 = """0 2011
+1 2012
+2 2013
+dtype: object"""
+
+ exp6 = """0 2011-01-01 09:00
+1 2012-02-01 10:00
+2 NaT
+dtype: object"""
+
+ exp7 = """0 2013Q1
+dtype: object"""
+
+ exp8 = """0 2013Q1
+1 2013Q2
+dtype: object"""
+
+ exp9 = """0 2013Q1
+1 2013Q2
+2 2013Q3
+dtype: object"""
+
+ for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
+ [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
+ result = repr(pd.Series(idx))
+ self.assertEqual(result, expected)
+
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
| Closes #10971. It is caused by #10718.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10974 | 2015-09-02T21:57:36Z | 2015-09-03T15:18:09Z | 2015-09-03T15:18:09Z | 2015-09-03T20:39:24Z |
DOC: Added default values in parsers.py doc-string | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 9ad992c434984..6801e8935e079 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -54,7 +54,7 @@ class ParserWarning(Warning):
Skip spaces after delimiter
escapechar : string (length 1), default None
One-character string used to escape delimiter when quoting is QUOTE_NONE.
-dtype : Type name or dict of column -> type
+dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
(Unsupported with engine='python')
compression : {'gzip', 'bz2', 'infer', None}, default 'infer'
@@ -65,7 +65,7 @@ class ParserWarning(Warning):
dialect : string or csv.Dialect instance, default None
If None defaults to Excel dialect. Ignored if sep longer than 1 char
See csv.Dialect documentation for more details
-header : int, list of ints
+header : int, list of ints, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly
pass ``header=0`` to be able to replace existing names. The header can be
@@ -74,7 +74,7 @@ class ParserWarning(Warning):
skipped (e.g. 2 in this example are skipped). Note that this parameter
ignores commented lines and empty lines if ``skip_blank_lines=True``, so header=0
denotes the first line of data rather than the first line of the file.
-skiprows : list-like or integer
+skiprows : list-like or integer, default None
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file
index_col : int or sequence or False, default None
@@ -82,7 +82,7 @@ class ParserWarning(Warning):
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
-names : array-like
+names : array-like, default None
List of column names to use. If file contains no header row, then you
should explicitly pass header=None
prefix : string, default None
@@ -90,14 +90,14 @@ class ParserWarning(Warning):
na_values : str, list-like or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values
-true_values : list
+true_values : list, default None
Values to consider as True
-false_values : list
+false_values : list, default None
Values to consider as False
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
-parse_dates : boolean, list of ints or names, list of lists, or dict
+parse_dates : boolean, list of ints or names, list of lists, or dict, default False
If True -> try parsing the index.
If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
@@ -106,7 +106,7 @@ class ParserWarning(Warning):
keep_date_col : boolean, default False
If True and parse_dates specifies combining multiple columns then
keep the original columns.
-date_parser : function
+date_parser : function, default None
Function to use for converting a sequence of string columns to an
array of datetime instances. The default uses dateutil.parser.parser
to do the conversion. Pandas will try to call date_parser in three different
@@ -154,7 +154,7 @@ class ParserWarning(Warning):
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
-usecols : array-like
+usecols : array-like, default None
Return a subset of the columns.
Results in much faster parsing time and lower memory usage.
mangle_dupe_cols : boolean, default True
| Function declaration contains default values for many of the parameters but the default values are not specified in the doc.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10968 | 2015-09-02T01:19:44Z | 2015-09-02T08:51:21Z | 2015-09-02T08:51:21Z | 2015-09-02T13:09:57Z |
ENH: read_excel MultiIndex #4679 | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 31d0be6151ba4..f3d14b78bbf54 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1989,6 +1989,46 @@ advanced strategies
Reading Excel Files
'''''''''''''''''''
+.. versionadded:: 0.17
+
+``read_excel`` can read a ``MultiIndex`` index, by passing a list of columns to ``index_col``
+and a ``MultiIndex`` column by passing a list of rows to ``header``. If either the ``index``
+or ``columns`` have serialized level names those will be read in as well by specifying
+the rows/columns that make up the levels.
+
+.. ipython:: python
+
+ # MultiIndex index - no names
+ df = pd.DataFrame({'a':[1,2,3,4], 'b':[5,6,7,8]},
+ index=pd.MultiIndex.from_product([['a','b'],['c','d']]))
+ df.to_excel('path_to_file.xlsx')
+ df = pd.read_excel('path_to_file.xlsx', index_col=[0,1])
+ df
+
+ # MultiIndex index - with names
+ df.index = df.index.set_names(['lvl1', 'lvl2'])
+ df.to_excel('path_to_file.xlsx')
+ df = pd.read_excel('path_to_file.xlsx', index_col=[0,1])
+ df
+
+ # MultiIndex index and column - with names
+ df.columns = pd.MultiIndex.from_product([['a'],['b', 'd']], names=['c1', 'c2'])
+ df.to_excel('path_to_file.xlsx')
+ df = pd.read_excel('path_to_file.xlsx',
+ index_col=[0,1], header=[0,1])
+ df
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('path_to_file.xlsx')
+
+.. warning::
+
+ Excel files saved in version 0.16.2 or prior that had index names will still able to be read in,
+ but the ``has_index_names`` argument must specified to ``True``.
+
.. versionadded:: 0.16
``read_excel`` can read more than one sheet, by setting ``sheetname`` to either
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index f88e5c0a11f9f..5a0e33b193b66 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -205,6 +205,53 @@ The support math functions are `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`,
These functions map to the intrinsics for the NumExpr engine. For Python
engine, they are mapped to NumPy calls.
+Changes to Excel with ``MultiIndex``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In version 0.16.2 a ``DataFrame`` with ``MultiIndex`` columns could not be written to Excel via ``to_excel``.
+That functionality has been added (:issue:`10564`), along with updating ``read_excel`` so that the data can
+be read back with no loss of information by specifying which columns/rows make up the ``MultiIndex``
+in the ``header`` and ``index_col`` parameters (:issue:`4679`)
+
+See the :ref:`documentation <io.excel>` for more details.
+
+.. ipython:: python
+
+ df = pd.DataFrame([[1,2,3,4], [5,6,7,8]],
+ columns = pd.MultiIndex.from_product([['foo','bar'],['a','b']],
+ names = ['col1', 'col2']),
+ index = pd.MultiIndex.from_product([['j'], ['l', 'k']],
+ names = ['i1', 'i2']))
+
+ df
+ df.to_excel('test.xlsx')
+
+ df = pd.read_excel('test.xlsx', header=[0,1], index_col=[0,1])
+ df
+
+.. ipython:: python
+ :suppress:
+
+ import os
+ os.remove('test.xlsx')
+
+Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel``
+if the serialized data had index names. For version 0.17 the ouptput format of ``to_excel``
+has been changed to make this keyword unnecessary - the change is shown below.
+
+**Old**
+
+.. image:: _static/old-excel-index.png
+
+**New**
+
+.. image:: _static/new-excel-index.png
+
+.. warning::
+
+ Excel files saved in version 0.16.2 or prior that had index names will still able to be read in,
+ but the ``has_index_names`` argument must specified to ``True``.
+
+
.. _whatsnew_0170.enhancements.other:
Other enhancements
@@ -761,7 +808,6 @@ Changes to ``Categorical.unique``
cat
cat.unique()
-
.. _whatsnew_0170.api_breaking.other:
Other API Changes
@@ -771,7 +817,6 @@ Other API Changes
- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a Series with a ``CategoricalIndex`` (:issue:`10704`)
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
- The metadata properties of subclasses of pandas objects will now be serialized (:issue:`10553`).
-- Allow ``DataFrame`` with ``MultiIndex`` columns to be written to Excel (:issue:`10564`). This was changed in 0.16.2 as the read-back method could not always guarantee perfect fidelity (:issue:`9794`).
- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`)
- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`)
- When constructing ``DataFrame`` with an array of ``complex64`` dtype that meant the corresponding column was automatically promoted to the ``complex128`` dtype. Pandas will now preserve the itemsize of the input for complex data (:issue:`10952`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 29f1e1efe9f5d..47d0ef37383c4 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -4,7 +4,6 @@
# pylint: disable=W0141
import sys
-import warnings
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
@@ -1641,14 +1640,11 @@ class ExcelFormatter(object):
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
- verbose: boolean, default True
- If True, warn user that the resulting output file may not be
- re-read or parsed directly by pandas.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
- inf_rep='inf', verbose=True):
+ inf_rep='inf'):
self.df = df
self.rowcounter = 0
self.na_rep = na_rep
@@ -1661,7 +1657,6 @@ def __init__(self, df, na_rep='', float_format=None, cols=None,
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
- self.verbose = verbose
def _format_value(self, val):
if lib.checknull(val):
@@ -1682,10 +1677,6 @@ def _format_header_mi(self):
raise NotImplementedError("Writing to Excel with MultiIndex"
" columns and no index ('index'=False) "
"is not yet implemented.")
- elif self.index and self.verbose:
- warnings.warn("Writing to Excel with MultiIndex columns is a"
- " one way serializable operation. You will not"
- " be able to re-read or parse the output file.")
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
@@ -1796,18 +1787,14 @@ def _format_regular_rows(self):
else:
index_label = self.df.index.names[0]
+ if isinstance(self.columns, MultiIndex):
+ self.rowcounter += 1
+
if index_label and self.header is not False:
- if self.merge_cells:
- yield ExcelCell(self.rowcounter,
- 0,
- index_label,
- header_style)
- self.rowcounter += 1
- else:
- yield ExcelCell(self.rowcounter - 1,
- 0,
- index_label,
- header_style)
+ yield ExcelCell(self.rowcounter - 1,
+ 0,
+ index_label,
+ header_style)
# write index_values
index_values = self.df.index
@@ -1841,19 +1828,21 @@ def _format_hierarchical_rows(self):
(list, tuple, np.ndarray, Index)):
index_labels = self.index_label
+ # MultiIndex columns require an extra row
+ # with index names (blank if None) for
+ # unambigous round-trip
+ if isinstance(self.columns, MultiIndex):
+ self.rowcounter += 1
+
# if index labels are not empty go ahead and dump
if (any(x is not None for x in index_labels)
and self.header is not False):
- if not self.merge_cells:
- self.rowcounter -= 1
-
for cidx, name in enumerate(index_labels):
- yield ExcelCell(self.rowcounter,
+ yield ExcelCell(self.rowcounter - 1,
cidx,
name,
header_style)
- self.rowcounter += 1
if self.merge_cells:
# Format hierarchical rows as merged cells.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cb237b93c70ba..0e8bdbccb53fb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1336,9 +1336,6 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
- verbose: boolean, default True
- If True, warn user that the resulting output file may not be
- re-read or parsed directly by pandas.
Notes
-----
@@ -1371,7 +1368,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
index=index,
index_label=index_label,
merge_cells=merge_cells,
- inf_rep=inf_rep, verbose=verbose)
+ inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index d5258cb32e6e0..b113cbf057f39 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -18,6 +18,7 @@
BytesIO, string_types)
from pandas.core import config
from pandas.core.common import pprint_thing
+from pandas.util.decorators import Appender
import pandas.compat as compat
import pandas.compat.openpyxl_compat as openpyxl_compat
import pandas.core.common as com
@@ -68,15 +69,11 @@ def get_writer(engine_name):
raise ValueError("No Excel writer '%s'" % engine_name)
-def read_excel(io, sheetname=0, **kwds):
- """Read an Excel table into a pandas DataFrame
+excel_doc_common = """
+ Read an Excel table into a pandas DataFrame
Parameters
- ----------
- io : string, file-like object, or xlrd workbook.
- The string could be a URL. Valid URL schemes include http, ftp, s3,
- and file. For file URLs, a host is expected. For instance, a local
- file could be file://localhost/path/to/workbook.xlsx
+ ----------%(io)s
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
@@ -97,20 +94,23 @@ def read_excel(io, sheetname=0, **kwds):
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
- header : int, default 0
- Row to use for the column labels of the parsed DataFrame
+ header : int, list of ints, default 0
+ Row (0-indexed) to use for the column labels of the parsed
+ DataFrame. If a list of integers is passed those row positions will
+ be combined into a ``MultiIndex``
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
+ index_col : int, list of ints, default None
+ Column (0-indexed) to use as the row labels of the DataFrame.
+ Pass None if there is no such column. If a list is passed,
+ those columns will be combined into a ``MultiIndex``
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
- index_col : int, default None
- Column to use as the row labels of the DataFrame. Pass None if
- there is no such column
parse_cols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
@@ -119,22 +119,21 @@ def read_excel(io, sheetname=0, **kwds):
column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
+ thousands : str, default None
+ Thousands separator
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
verbose : boolean, default False
- Indicate number of NA values placed in non-numeric columns
- engine: string, default None
- If io is not a buffer or path, this must be set to identify io.
- Acceptable values are None or xlrd
+ Indicate number of NA values placed in non-numeric columns%(eng)s
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
- has_index_names : boolean, default False
- True if the cols defined in index_col have an index name and are
- not in the header. Index name will be placed on a separate line below
- the header.
+ has_index_names : boolean, default None
+ DEPCRECATED: for version 0.17+ index names will be automatically inferred
+ based on index_col. To read Excel output from 0.16.2 and prior that
+ had saved index names, use True.
Returns
-------
@@ -143,6 +142,19 @@ def read_excel(io, sheetname=0, **kwds):
for more information on when a Dict of Dataframes is returned.
"""
+read_excel_kwargs = dict()
+read_excel_kwargs['io'] = """
+ io : string, file-like object, or xlrd workbook.
+ The string could be a URL. Valid URL schemes include http, ftp, s3,
+ and file. For file URLs, a host is expected. For instance, a local
+ file could be file://localhost/path/to/workbook.xlsx"""
+read_excel_kwargs['eng'] = """
+ engine: string, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Acceptable values are None or xlrd"""
+
+@Appender(excel_doc_common % read_excel_kwargs)
+def read_excel(io, sheetname=0, **kwds):
engine = kwds.pop('engine', None)
return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
@@ -193,83 +205,23 @@ def __init__(self, io, **kwds):
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
+ @Appender(excel_doc_common % dict(io='', eng=''))
def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None, chunksize=None,
- convert_float=True, has_index_names=False, converters=None, **kwds):
- """Read an Excel table into DataFrame
+ convert_float=True, has_index_names=None, converters=None, **kwds):
- Parameters
- ----------
- sheetname : string, int, mixed list of strings/ints, or None, default 0
-
- Strings are used for sheet names, Integers are used in zero-indexed sheet
- positions.
-
- Lists of strings/integers are used to request multiple sheets.
-
- Specify None to get all sheets.
-
- str|int -> DataFrame is returned.
- list|None -> Dict of DataFrames is returned, with keys representing sheets.
-
- Available Cases
-
- * Defaults to 0 -> 1st sheet as a DataFrame
- * 1 -> 2nd sheet as a DataFrame
- * "Sheet1" -> 1st sheet as a DataFrame
- * [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
- * None -> All sheets as a dictionary of DataFrames
- header : int, default 0
- Row to use for the column labels of the parsed DataFrame
- skiprows : list-like
- Rows to skip at the beginning (0-indexed)
- skip_footer : int, default 0
- Rows at the end to skip (0-indexed)
- converters : dict, default None
- Dict of functions for converting values in certain columns. Keys can
- either be integers or column labels
- index_col : int, default None
- Column to use as the row labels of the DataFrame. Pass None if
- there is no such column
- parse_cols : int or list, default None
- * If None then parse all columns
- * If int then indicates last column to be parsed
- * If list of ints then indicates list of column numbers to be
- parsed
- * If string then indicates comma separated list of column names and
- column ranges (e.g. "A:E" or "A,C,E:F")
- parse_dates : boolean, default False
- Parse date Excel values,
- date_parser : function default None
- Date parsing function
- na_values : list-like, default None
- List of additional strings to recognize as NA/NaN
- thousands : str, default None
- Thousands separator
- chunksize : int, default None
- Size of file chunk to read for lazy evaluation.
- convert_float : boolean, default True
- convert integral floats to int (i.e., 1.0 --> 1). If False, all
- numeric data will be read in as floats: Excel stores all numbers as
- floats internally.
- has_index_names : boolean, default False
- True if the cols defined in index_col have an index name and are
- not in the header
- verbose : boolean, default False
- Set to True to print a single statement when reading each
- excel sheet.
-
- Returns
- -------
- parsed : DataFrame or Dict of DataFrames
- DataFrame from the passed in Excel file. See notes in sheetname argument
- for more information on when a Dict of Dataframes is returned.
- """
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
skip_footer = skipfooter
+ if has_index_names is not None:
+ warn("\nThe has_index_names argument is deprecated; index names "
+ "will be automatically inferred based on index_col.\n"
+ "This argmument is still necessary if reading Excel output "
+ "from 0.16.2 or prior with index names.", FutureWarning,
+ stacklevel=3)
+
return self._parse_excel(sheetname=sheetname, header=header,
skiprows=skiprows,
index_col=index_col,
@@ -418,8 +370,40 @@ def _parse_cell(cell_contents,cell_typ):
if sheet.nrows == 0:
return DataFrame()
+ if com.is_list_like(header) and len(header) == 1:
+ header = header[0]
+
+ # forward fill and pull out names for MultiIndex column
+ header_names = None
if header is not None:
- data[header] = _trim_excel_header(data[header])
+ if com.is_list_like(header):
+ header_names = []
+ for row in header:
+ if com.is_integer(skiprows):
+ row += skiprows
+ data[row] = _fill_mi_header(data[row])
+ header_name, data[row] = _pop_header_name(data[row], index_col)
+ header_names.append(header_name)
+ else:
+ data[header] = _trim_excel_header(data[header])
+
+ if com.is_list_like(index_col):
+ # forward fill values for MultiIndex index
+ if not com.is_list_like(header):
+ offset = 1 + header
+ else:
+ offset = 1 + max(header)
+
+ for col in index_col:
+ last = data[offset][col]
+ for row in range(offset + 1, len(data)):
+ if data[row][col] == '' or data[row][col] is None:
+ data[row][col] = last
+ else:
+ last = data[row][col]
+
+ if com.is_list_like(header) and len(header) > 1:
+ has_index_names = True
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
@@ -433,6 +417,7 @@ def _parse_cell(cell_contents,cell_typ):
**kwds)
output[asheetname] = parser.read()
+ output[asheetname].columns = output[asheetname].columns.set_names(header_names)
if ret_dict:
return output
@@ -463,6 +448,29 @@ def _trim_excel_header(row):
row = row[1:]
return row
+def _fill_mi_header(row):
+ # forward fill blanks entries
+ # from headers if parsing as MultiIndex
+ last = row[0]
+ for i in range(1, len(row)):
+ if row[i] == '' or row[i] is None:
+ row[i] = last
+ else:
+ last = row[i]
+ return row
+
+# fill blank if index_col not None
+def _pop_header_name(row, index_col):
+ """ (header, new_data) for header rows in MultiIndex parsing"""
+ none_fill = lambda x: None if x == '' else x
+
+ if index_col is None:
+ # no index col specified, trim data for inference path
+ return none_fill(row[0]), row[1:]
+ else:
+ # pop out header name and fill w/ blank
+ i = index_col if not com.is_list_like(index_col) else max(index_col)
+ return none_fill(row[i]), row[:i] + [''] + row[i+1:]
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
diff --git a/pandas/io/tests/data/test_index_name_pre17.xls b/pandas/io/tests/data/test_index_name_pre17.xls
new file mode 100644
index 0000000000000..2ab13105e7925
Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xls differ
diff --git a/pandas/io/tests/data/test_index_name_pre17.xlsm b/pandas/io/tests/data/test_index_name_pre17.xlsm
new file mode 100644
index 0000000000000..33c0d7949531c
Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xlsm differ
diff --git a/pandas/io/tests/data/test_index_name_pre17.xlsx b/pandas/io/tests/data/test_index_name_pre17.xlsx
new file mode 100644
index 0000000000000..ce66c40cda141
Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xlsx differ
diff --git a/pandas/io/tests/data/testmultiindex.xls b/pandas/io/tests/data/testmultiindex.xls
new file mode 100644
index 0000000000000..3664c5c8dedcc
Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xls differ
diff --git a/pandas/io/tests/data/testmultiindex.xlsm b/pandas/io/tests/data/testmultiindex.xlsm
new file mode 100644
index 0000000000000..8f359782b57bb
Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xlsm differ
diff --git a/pandas/io/tests/data/testmultiindex.xlsx b/pandas/io/tests/data/testmultiindex.xlsx
new file mode 100644
index 0000000000000..a70110caf1ec7
Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xlsx differ
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 073fc55357df7..0aee2af6ad166 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -14,6 +14,7 @@
import numpy as np
from numpy.testing.decorators import slow
+import pandas as pd
from pandas import DataFrame, Index, MultiIndex
from pandas.io.parsers import read_csv
from pandas.io.excel import (
@@ -21,7 +22,7 @@
_Openpyxl2Writer, register_writer, _XlsxWriter
)
from pandas.io.common import URLError
-from pandas.util.testing import ensure_clean
+from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
from pandas.core.config import set_option, get_option
import pandas.util.testing as tm
@@ -415,11 +416,8 @@ def test_read_xlrd_Book(self):
@tm.network
def test_read_from_http_url(self):
- # TODO: remove this when merging into master
- url = ('https://raw.github.com/davidovitch/pandas/master/'
+ url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/test1' + self.ext)
-# url = ('https://raw.github.com/pydata/pandas/master/'
-# 'pandas/io/tests/data/test' + self.ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1')
tm.assert_frame_equal(url_table, local_table)
@@ -518,6 +516,132 @@ def test_reader_seconds(self):
actual = self.get_exceldf('times_1904', 'Sheet1')
tm.assert_frame_equal(actual, expected)
+ def test_read_excel_multiindex(self):
+ #GH 4679
+ mi = MultiIndex.from_product([['foo','bar'],['a','b']])
+ mi_file = os.path.join(self.dirpath, 'testmultiindex' + self.ext)
+
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns = mi)
+
+ actual = read_excel(mi_file, 'mi_column', header=[0,1])
+ tm.assert_frame_equal(actual, expected)
+ actual = read_excel(mi_file, 'mi_column', header=[0,1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.columns = ['a', 'b', 'c', 'd']
+ expected.index = mi
+ actual = read_excel(mi_file, 'mi_index', index_col=[0,1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ expected.columns = mi
+ actual = read_excel(mi_file, 'both', index_col=[0,1], header=[0,1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+ expected.index = mi.set_names(['ilvl1', 'ilvl2'])
+ expected.columns = ['a', 'b', 'c', 'd']
+ actual = read_excel(mi_file, 'mi_index_name', index_col=[0,1])
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = list(range(4))
+ expected.columns = mi.set_names(['c1', 'c2'])
+ actual = read_excel(mi_file, 'mi_column_name', header=[0,1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = mi.set_names(['ilvl1', 'ilvl2'])
+ actual = read_excel(mi_file, 'both_name', index_col=[0,1], header=[0,1])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = read_excel(mi_file, 'both_name', index_col=[0,1], header=[0,1])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0,1],
+ header=[0,1], skiprows=2)
+ tm.assert_frame_equal(actual, expected)
+
+
+ def test_excel_multindex_roundtrip(self):
+ #GH 4679
+ _skip_if_no_xlsxwriter()
+ with ensure_clean('.xlsx') as pth:
+ for c_idx_names in [True, False]:
+ for r_idx_names in [True, False]:
+ for c_idx_levels in [1, 3]:
+ for r_idx_levels in [1, 3]:
+ # column index name can't be serialized unless MultiIndex
+ if (c_idx_levels == 1 and c_idx_names):
+ continue
+
+ # empty name case current read in as unamed levels, not Nones
+ check_names = True
+ if not r_idx_names and r_idx_levels > 1:
+ check_names = False
+
+ df = mkdf(5, 5, c_idx_names,
+ r_idx_names, c_idx_levels,
+ r_idx_levels)
+ df.to_excel(pth)
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[0, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[-1, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ def test_excel_oldindex_format(self):
+ #GH 4679
+ data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
+ ['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
+ ['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
+ ['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
+ ['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
+ columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
+ mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'],
+ ['R_l1_g0', 'R_l1_g1', 'R_l1_g2', 'R_l1_g3', 'R_l1_g4']],
+ labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
+ names=['R0', 'R1'])
+ si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'], name='R0')
+
+ in_file = os.path.join(self.dirpath, 'test_index_name_pre17' + self.ext)
+
+ expected = pd.DataFrame(data, index=si, columns=columns)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(in_file, 'single_names', has_index_names=True)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index.name = None
+ actual = pd.read_excel(in_file, 'single_no_names')
+ tm.assert_frame_equal(actual, expected)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(in_file, 'single_no_names', has_index_names=False)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index = mi
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(in_file, 'multi_names', has_index_names=True)
+ tm.assert_frame_equal(actual, expected)
+
+ expected.index.names = [None, None]
+ actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0,1])
+ tm.assert_frame_equal(actual, expected, check_names=False)
+ with tm.assert_produces_warning(FutureWarning):
+ actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0,1],
+ has_index_names=False)
+ tm.assert_frame_equal(actual, expected, check_names=False)
+
+
class XlsReaderTests(XlrdTests, tm.TestCase):
ext = '.xls'
@@ -537,6 +661,8 @@ class XlsmReaderTests(XlrdTests, tm.TestCase):
check_skip = staticmethod(_skip_if_no_xlrd)
+
+
class ExcelWriterBase(SharedItems):
# Base class for test cases to run with different Excel writers.
# To add a writer test, define the following:
@@ -781,7 +907,6 @@ def test_roundtrip_indexlabels(self):
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
- has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
@@ -794,7 +919,6 @@ def test_roundtrip_indexlabels(self):
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
- has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
self.assertEqual(frame.index.names, recons.index.names)
@@ -807,7 +931,6 @@ def test_roundtrip_indexlabels(self):
reader = ExcelFile(path)
recons = reader.parse('test1',
index_col=0,
- has_index_names=self.merge_cells
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
@@ -837,8 +960,7 @@ def test_excel_roundtrip_indexname(self):
xf = ExcelFile(path)
result = xf.parse(xf.sheet_names[0],
- index_col=0,
- has_index_names=self.merge_cells)
+ index_col=0)
tm.assert_frame_equal(result, df)
self.assertEqual(result.index.name, 'foo')
@@ -925,8 +1047,7 @@ def test_to_excel_multiindex(self):
frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
df = reader.parse('test1', index_col=[0, 1],
- parse_dates=False,
- has_index_names=self.merge_cells)
+ parse_dates=False)
tm.assert_frame_equal(frame, df)
self.assertEqual(frame.index.names, df.index.names)
@@ -943,8 +1064,7 @@ def test_to_excel_multiindex_dates(self):
tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells)
reader = ExcelFile(path)
recons = reader.parse('test1',
- index_col=[0, 1],
- has_index_names=self.merge_cells)
+ index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
self.assertEqual(recons.index.names, ('time', 'foo'))
@@ -1475,15 +1595,14 @@ def test_excel_raise_error_on_multiindex_columns_and_no_index(self):
with ensure_clean(self.ext) as path:
df.to_excel(path, index=False)
- def test_excel_warns_verbosely_on_multiindex_columns_and_index_true(self):
+ def test_excel_multiindex_columns_and_index_true(self):
_skip_if_no_xlwt()
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
- df = DataFrame(np.random.randn(10, 3), columns=cols)
- with tm.assert_produces_warning(UserWarning):
- with ensure_clean(self.ext) as path:
- df.to_excel(path, index=True)
+ df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
+ with ensure_clean(self.ext) as path:
+ df.to_excel(path, index=True)
def test_excel_multiindex_index(self):
_skip_if_no_xlwt()
| closes #4679
xref #10564
Output of `to_excel` should now be fully round-trippable with `read_excel` with the
right combination of `index_col` and `header`.
To make the semantics match `read_csv`, an index column name (`has_index_names=True`) is
always assumed if something is passed to `index_col` - this should be non-breaking;
if there are no names, it will be just filled to `None` as before.
```
In [7]: df = pd.DataFrame([[1,2,3,4], [5,6,7,8]],
...: columns = pd.MultiIndex.from_product([['foo','bar'],['a','b']],
...: names = ['col1', 'col2']),
...: index = pd.MultiIndex.from_product([['j'], ['l', 'k']],
...: names = ['i1', 'i2']))
In [8]: df
Out[8]:
col1 foo bar
col2 a b a b
i1 i2
j l 1 2 3 4
k 5 6 7 8
In [9]: df.to_excel('test.xlsx')
In [10]: df = pd.read_excel('test.xlsx', header=[0,1], index_col=[0,1])
In [11]: df
Out[11]:
col1 foo bar
col2 a b a b
i1 i2
j l 1 2 3 4
k 5 6 7 8
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10967 | 2015-09-02T00:25:28Z | 2015-09-09T12:06:09Z | 2015-09-09T12:06:09Z | 2016-09-15T13:39:47Z |
DOC: Add note regarding 0-len string to default NA values in IO docs | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 70e7154493ccf..ded314229225c 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -731,7 +731,9 @@ the corresponding equivalent values will also imply a missing value (in this cas
To completely override the default values that are recognized as missing, specify ``keep_default_na=False``.
The default ``NaN`` recognized values are ``['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A','N/A', 'NA',
-'#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan']``.
+'#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan']``. Although a 0-length string
+``''`` is not included in the default ``NaN`` values list, it is still treated
+as a missing value.
.. code-block:: python
| Fixes https://github.com/pydata/pandas/issues/10700
| https://api.github.com/repos/pandas-dev/pandas/pulls/10965 | 2015-09-01T20:49:25Z | 2015-09-02T08:52:40Z | 2015-09-02T08:52:40Z | 2015-09-02T08:52:40Z |
BUG: DataFrame subplot with duplicated columns output incorrect result | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3eed3f7ddada2..86c8aa488a7df 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -922,3 +922,4 @@ Bug Fixes
- Bug in ``groupby`` incorrect computation for aggregation on ``DataFrame`` with ``NaT`` (E.g ``first``, ``last``, ``min``). (:issue:`10590`)
- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
- Bug in ``.var()`` causing roundoff errors for highly similar values (:issue:`10242`)
+- Bug in ``DataFrame.plot(subplots=True)`` with duplicated columns outputs incorrect result (:issue:`10962`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 71fd85bde1235..d1f1f2196558a 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -1646,6 +1646,28 @@ def test_subplots_sharex_axes_existing_axes(self):
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
+ @slow
+ def test_subplots_dup_columns(self):
+ # GH 10962
+ df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
+ axes = df.plot(subplots=True)
+ for ax in axes:
+ self._check_legend_labels(ax, labels=['a'])
+ self.assertEqual(len(ax.lines), 1)
+ tm.close()
+
+ axes = df.plot(subplots=True, secondary_y='a')
+ for ax in axes:
+ # (right) is only attached when subplots=False
+ self._check_legend_labels(ax, labels=['a'])
+ self.assertEqual(len(ax.lines), 1)
+ tm.close()
+
+ ax = df.plot(secondary_y='a')
+ self._check_legend_labels(ax, labels=['a (right)'] * 5)
+ self.assertEqual(len(ax.lines), 0)
+ self.assertEqual(len(ax.right_ax.lines), 5)
+
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index e0d13287fcf3b..9eab385a7a2a5 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -920,11 +920,11 @@ def _iter_data(self, data=None, keep_index=False, fillna=None):
else:
columns = data.columns
- for col in columns:
+ for col, values in data.iteritems():
if keep_index is True:
- yield col, data[col]
+ yield col, values
else:
- yield col, data[col].values
+ yield col, values.values
@property
def nseries(self):
| When `DataFrame` has duplicated column name, each subplot will contain all the lines with same name.
```
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
df.plot(subplots=True)
```

| https://api.github.com/repos/pandas-dev/pandas/pulls/10962 | 2015-09-01T15:52:12Z | 2015-09-05T02:14:54Z | 2015-09-05T02:14:54Z | 2015-09-05T02:14:56Z |
BUG: Fixed bug that Timedelta raises error when slicing from 0s (issue #10583) | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index eae33bc80be32..fc5777ddea3f1 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -821,6 +821,7 @@ Bug Fixes
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.DataFrame.diff`` when DataFrame is not consolidated (:issue:`10907`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`)
+- Bug in ``Timedelta`` raising error when slicing from 0s (:issue:`10583`)
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 4870fbd55f33e..97e7f883542cc 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -404,6 +404,14 @@ def test_timedelta_range(self):
result = timedelta_range('0 days',freq='30T',periods=50)
tm.assert_index_equal(result, expected)
+ # issue10583
+ df = pd.DataFrame(np.random.normal(size=(10,4)))
+ df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
+ expected = df.loc[pd.Timedelta('0s'):,:]
+ result = df.loc['0s':,:]
+ assert_frame_equal(expected, result)
+
+
def test_numeric_conversions(self):
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 369993b4c54d1..77ac362181a2b 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -2265,9 +2265,8 @@ class Timedelta(_Timedelta):
return "m"
elif self._h:
return "h"
- elif self._d:
+ else:
return "D"
- raise ValueError("invalid resolution")
def round(self, reso):
"""
| closes #10583
| https://api.github.com/repos/pandas-dev/pandas/pulls/10960 | 2015-09-01T12:46:37Z | 2015-09-01T19:38:53Z | 2015-09-01T19:38:53Z | 2015-09-01T19:39:57Z |
TST: Changed pythonxs link to alternative link and mofidifed test_html (issue: 10906) | diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index ebdfbe5af299d..fb7ffbc6dd621 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -358,20 +358,16 @@ def test_negative_skiprows(self):
@network
def test_multiple_matches(self):
- raise nose.SkipTest("pythonxy link seems to have changed")
-
- url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
- dfs = self.read_html(url, match='Python', attrs={'class': 'wikitable'})
+ url = 'https://docs.python.org/2/'
+ dfs = self.read_html(url, match='Python')
self.assertTrue(len(dfs) > 1)
@network
- def test_pythonxy_plugins_table(self):
- raise nose.SkipTest("pythonxy link seems to have changed")
-
- url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins'
- dfs = self.read_html(url, match='Python', attrs={'class': 'wikitable'})
- zz = [df.iloc[0, 0] for df in dfs]
- self.assertEqual(sorted(zz), sorted(['Python', 'SciTE']))
+ def test_python_docs_table(self):
+ url = 'https://docs.python.org/2/'
+ dfs = self.read_html(url, match='Python')
+ zz = [df.iloc[0, 0][0:4] for df in dfs]
+ self.assertEqual(sorted(zz), sorted(['Repo', 'What']))
@slow
def test_thousands_macau_stats(self):
| See issue #10906
| https://api.github.com/repos/pandas-dev/pandas/pulls/10958 | 2015-09-01T02:53:10Z | 2015-09-01T11:03:07Z | 2015-09-01T11:03:07Z | 2015-09-01T13:23:33Z |
Add support for math functions in eval() | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 749512890d86b..d52c1d55a6c66 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -38,6 +38,7 @@ Highlights include:
- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
- Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>`
- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>`
+- Support for math functions in .eval(), see :ref:`here <whatsnew_0170.matheval>`
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
@@ -123,6 +124,25 @@ incrementally.
See the :ref:`docs <io.sas>` for more details.
+.. _whatsnew_0170.matheval:
+
+Support for Math Functions in .eval()
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:meth:`~pandas.eval` now supports calling math functions.
+
+.. code-block:: python
+
+ df = pd.DataFrame({'a': np.random.randn(10)})
+ df.eval("b = sin(a)")
+
+The support math functions are `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`,
+`sqrt`, `sinh`, `cosh`, `tanh`, `arcsin`, `arccos`, `arctan`, `arccosh`,
+`arcsinh`, `arctanh`, `abs` and `arctan2`.
+
+These functions map to the intrinsics for the NumExpr engine. For Python
+engine, they are mapped to NumPy calls.
+
.. _whatsnew_0170.enhancements.other:
Other enhancements
diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py
index b6a1fcbec8339..123051d802d7d 100644
--- a/pandas/computation/expr.py
+++ b/pandas/computation/expr.py
@@ -20,7 +20,7 @@
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
-from pandas.computation.ops import UndefinedVariableError
+from pandas.computation.ops import UndefinedVariableError, FuncNode
from pandas.computation.scope import Scope, _ensure_scope
@@ -524,27 +524,48 @@ def visit_Call(self, node, side=None, **kwargs):
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
- res = self.visit(node.func)
+ try:
+ res = self.visit(node.func)
+ except UndefinedVariableError:
+ # Check if this is a supported function name
+ try:
+ res = FuncNode(node.func.id)
+ except ValueError:
+ # Raise original error
+ raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
- args = [self.visit(targ).value for targ in node.args]
- if node.starargs is not None:
- args += self.visit(node.starargs).value
+ if isinstance(res, FuncNode):
+ args = [self.visit(targ) for targ in node.args]
+
+ if node.starargs is not None:
+ args += self.visit(node.starargs)
+
+ if node.keywords or node.kwargs:
+ raise TypeError("Function \"{0}\" does not support keyword "
+ "arguments".format(res.name))
+
+ return res(*args, **kwargs)
+
+ else:
+ args = [self.visit(targ).value for targ in node.args]
+ if node.starargs is not None:
+ args += self.visit(node.starargs).value
- keywords = {}
- for key in node.keywords:
- if not isinstance(key, ast.keyword):
- raise ValueError("keyword error in function call "
- "'{0}'".format(node.func.id))
- keywords[key.arg] = self.visit(key.value).value
- if node.kwargs is not None:
- keywords.update(self.visit(node.kwargs).value)
+ keywords = {}
+ for key in node.keywords:
+ if not isinstance(key, ast.keyword):
+ raise ValueError("keyword error in function call "
+ "'{0}'".format(node.func.id))
+ keywords[key.arg] = self.visit(key.value).value
+ if node.kwargs is not None:
+ keywords.update(self.visit(node.kwargs).value)
- return self.const_type(res(*args, **keywords), self.env)
+ return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
@@ -587,7 +608,7 @@ def visitor(x, y):
return reduce(visitor, operands)
-_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn'])
+_python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py
index 9df9975b4b61c..f6d5f171036ea 100644
--- a/pandas/computation/ops.py
+++ b/pandas/computation/ops.py
@@ -16,9 +16,12 @@
_reductions = 'sum', 'prod'
-_mathops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', 'pow', 'div', 'sqrt',
- 'inv', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan',
- 'arccosh', 'arcsinh', 'arctanh', 'arctan2', 'abs')
+
+_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
+ 'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
+ 'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
+_binary_math_ops = ('arctan2',)
+_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
@@ -498,3 +501,28 @@ def return_type(self):
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
+
+
+class MathCall(Op):
+ def __init__(self, func, args):
+ super(MathCall, self).__init__(func.name, args)
+ self.func = func
+
+ def __call__(self, env):
+ operands = [op(env) for op in self.operands]
+ return self.func.func(*operands)
+
+ def __unicode__(self):
+ operands = map(str, self.operands)
+ return com.pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
+
+
+class FuncNode(object):
+ def __init__(self, name):
+ if name not in _mathops:
+ raise ValueError("\"{0}\" is not a supported function".format(name))
+ self.name = name
+ self.func = getattr(np, name)
+
+ def __call__(self, *args):
+ return MathCall(self, args)
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 4f998319d922d..8db0b82f1aa2e 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -23,7 +23,8 @@
from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor
from pandas.computation.ops import (_binary_ops_dict,
_special_case_arith_ops_syms,
- _arith_ops_syms, _bool_ops_syms)
+ _arith_ops_syms, _bool_ops_syms,
+ _unary_math_ops, _binary_math_ops)
import pandas.computation.expr as expr
import pandas.util.testing as tm
@@ -1439,6 +1440,129 @@ def setUpClass(cls):
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
+class TestMathPythonPython(tm.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(TestMathPythonPython, cls).setUpClass()
+ tm.skip_if_no_ne()
+ cls.engine = 'python'
+ cls.parser = 'pandas'
+ cls.unary_fns = _unary_math_ops
+ cls.binary_fns = _binary_math_ops
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.engine, cls.parser
+
+ def eval(self, *args, **kwargs):
+ kwargs['engine'] = self.engine
+ kwargs['parser'] = self.parser
+ kwargs['level'] = kwargs.pop('level', 0) + 1
+ return pd.eval(*args, **kwargs)
+
+ def test_unary_functions(self):
+ df = DataFrame({'a': np.random.randn(10)})
+ a = df.a
+ for fn in self.unary_fns:
+ expr = "{0}(a)".format(fn)
+ got = self.eval(expr)
+ expect = getattr(np, fn)(a)
+ pd.util.testing.assert_almost_equal(got, expect)
+
+ def test_binary_functions(self):
+ df = DataFrame({'a': np.random.randn(10),
+ 'b': np.random.randn(10)})
+ a = df.a
+ b = df.b
+ for fn in self.binary_fns:
+ expr = "{0}(a, b)".format(fn)
+ got = self.eval(expr)
+ expect = getattr(np, fn)(a, b)
+ np.testing.assert_allclose(got, expect)
+
+ def test_df_use_case(self):
+ df = DataFrame({'a': np.random.randn(10),
+ 'b': np.random.randn(10)})
+ df.eval("e = arctan2(sin(a), b)",
+ engine=self.engine,
+ parser=self.parser)
+ got = df.e
+ expect = np.arctan2(np.sin(df.a), df.b)
+ pd.util.testing.assert_almost_equal(got, expect)
+
+ def test_df_arithmetic_subexpression(self):
+ df = DataFrame({'a': np.random.randn(10),
+ 'b': np.random.randn(10)})
+ df.eval("e = sin(a + b)",
+ engine=self.engine,
+ parser=self.parser)
+ got = df.e
+ expect = np.sin(df.a + df.b)
+ pd.util.testing.assert_almost_equal(got, expect)
+
+ def check_result_type(self, dtype, expect_dtype):
+ df = DataFrame({'a': np.random.randn(10).astype(dtype)})
+ self.assertEqual(df.a.dtype, dtype)
+ df.eval("b = sin(a)",
+ engine=self.engine,
+ parser=self.parser)
+ got = df.b
+ expect = np.sin(df.a)
+ self.assertEqual(expect.dtype, got.dtype)
+ self.assertEqual(expect_dtype, got.dtype)
+ pd.util.testing.assert_almost_equal(got, expect)
+
+ def test_result_types(self):
+ self.check_result_type(np.int32, np.float64)
+ self.check_result_type(np.int64, np.float64)
+ self.check_result_type(np.float32, np.float32)
+ self.check_result_type(np.float64, np.float64)
+ # Did not test complex64 because DataFrame is converting it to
+ # complex128. Due to https://github.com/pydata/pandas/issues/10952
+ self.check_result_type(np.complex128, np.complex128)
+
+ def test_undefined_func(self):
+ df = DataFrame({'a': np.random.randn(10)})
+ with tm.assertRaisesRegexp(ValueError,
+ "\"mysin\" is not a supported function"):
+ df.eval("mysin(a)",
+ engine=self.engine,
+ parser=self.parser)
+
+ def test_keyword_arg(self):
+ df = DataFrame({'a': np.random.randn(10)})
+ with tm.assertRaisesRegexp(TypeError,
+ "Function \"sin\" does not support "
+ "keyword arguments"):
+ df.eval("sin(x=a)",
+ engine=self.engine,
+ parser=self.parser)
+
+
+class TestMathPythonPandas(TestMathPythonPython):
+ @classmethod
+ def setUpClass(cls):
+ super(TestMathPythonPandas, cls).setUpClass()
+ cls.engine = 'python'
+ cls.parser = 'pandas'
+
+
+class TestMathNumExprPandas(TestMathPythonPython):
+ @classmethod
+ def setUpClass(cls):
+ super(TestMathNumExprPandas, cls).setUpClass()
+ cls.engine = 'numexpr'
+ cls.parser = 'pandas'
+
+
+class TestMathNumExprPython(TestMathPythonPython):
+ @classmethod
+ def setUpClass(cls):
+ super(TestMathNumExprPython, cls).setUpClass()
+ cls.engine = 'numexpr'
+ cls.parser = 'python'
+
+
_var_s = randn(10)
| closes #4893
Extends the eval parser to accept calling math functions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10953 | 2015-08-31T17:20:37Z | 2015-09-05T22:26:10Z | 2015-09-05T22:26:10Z | 2015-09-05T22:26:15Z |
DEPR: Deprecate legacy offsets | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 6f30ff3f51ad5..4394981abb8c3 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -656,7 +656,7 @@ apply the offset to each element.
rng + DateOffset(months=2)
s + DateOffset(months=2)
s - DateOffset(months=2)
-
+
If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``,
``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be
used exactly like a ``Timedelta`` - see the
@@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the
td + Minute(15)
Note that some offsets (such as ``BQuarterEnd``) do not have a
-vectorized implementation. They can still be used but may
+vectorized implementation. They can still be used but may
calculate signficantly slower and will raise a ``PerformanceWarning``
.. ipython:: python
@@ -882,10 +882,10 @@ frequencies. We will refer to these aliases as *offset aliases*
"BAS", "business year start frequency"
"BH", "business hour frequency"
"H", "hourly frequency"
- "T", "minutely frequency"
+ "T, min", "minutely frequency"
"S", "secondly frequency"
- "L", "milliseonds"
- "U", "microseconds"
+ "L, ms", "milliseonds"
+ "U, us", "microseconds"
"N", "nanoseconds"
Combining Aliases
@@ -953,11 +953,12 @@ These can be used as arguments to ``date_range``, ``bdate_range``, constructors
for ``DatetimeIndex``, as well as various other timeseries-related functions
in pandas.
+.. _timeseries.legacyaliases:
+
Legacy Aliases
~~~~~~~~~~~~~~
-Note that prior to v0.8.0, time rules had a slightly different look. pandas
-will continue to support the legacy time rules for the time being but it is
-strongly recommended that you switch to using the new offset aliases.
+Note that prior to v0.8.0, time rules had a slightly different look. These are
+deprecated in v0.17.0, and removed in future version.
.. csv-table::
:header: "Legacy Time Rule", "Offset Alias"
@@ -987,9 +988,7 @@ strongly recommended that you switch to using the new offset aliases.
"A\@OCT", "BA\-OCT"
"A\@NOV", "BA\-NOV"
"A\@DEC", "BA\-DEC"
- "min", "T"
- "ms", "L"
- "us", "U"
+
As you can see, legacy quarterly and annual frequencies are business quarters
and business year ends. Please also note the legacy time rule for milliseconds
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9f2ec43cb2ae3..1d16136dd6b4d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -653,6 +653,7 @@ Deprecations
``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)``
(:issue:`10735`).
- ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`)
+- Legacy offsets (like ``'A@JAN'``) listed in :ref:`here <timeseries.legacyaliases>` are deprecated (note that this has been alias since 0.8.0), (:issue:`10878`)
- ``WidePanel`` deprecated in favor of ``Panel``, ``LongPanel`` in favor of ``DataFrame`` (note these have been aliases since < 0.11.0), (:issue:`10892`)
.. _whatsnew_0170.prior_deprecations:
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 85de5e083d6d9..7e5c3af43c861 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -2,6 +2,7 @@
from pandas.compat import range, long, zip
from pandas import compat
import re
+import warnings
import numpy as np
@@ -335,10 +336,8 @@ def get_period_alias(offset_str):
_rule_aliases = {
# Legacy rules that will continue to map to their original values
# essentially for the rest of time
-
'WEEKDAY': 'B',
'EOM': 'BM',
-
'W@MON': 'W-MON',
'W@TUE': 'W-TUE',
'W@WED': 'W-WED',
@@ -346,18 +345,9 @@ def get_period_alias(offset_str):
'W@FRI': 'W-FRI',
'W@SAT': 'W-SAT',
'W@SUN': 'W-SUN',
- 'W': 'W-SUN',
-
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
- 'Q': 'Q-DEC',
-
- 'A': 'A-DEC', # YearEnd(month=12),
- 'AS': 'AS-JAN', # YearBegin(month=1),
- 'BA': 'BA-DEC', # BYearEnd(month=12),
- 'BAS': 'BAS-JAN', # BYearBegin(month=1),
-
'A@JAN': 'BA-JAN',
'A@FEB': 'BA-FEB',
'A@MAR': 'BA-MAR',
@@ -370,8 +360,17 @@ def get_period_alias(offset_str):
'A@OCT': 'BA-OCT',
'A@NOV': 'BA-NOV',
'A@DEC': 'BA-DEC',
+}
+
+_lite_rule_alias = {
+ 'W': 'W-SUN',
+ 'Q': 'Q-DEC',
+
+ 'A': 'A-DEC', # YearEnd(month=12),
+ 'AS': 'AS-JAN', # YearBegin(month=1),
+ 'BA': 'BA-DEC', # BYearEnd(month=12),
+ 'BAS': 'BAS-JAN', # BYearBegin(month=1),
- # lite aliases
'Min': 'T',
'min': 'T',
'ms': 'L',
@@ -386,6 +385,7 @@ def get_period_alias(offset_str):
# Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal
# order matters when constructing an inverse. we pick one. #2331
+# Used in get_legacy_offset_name
_legacy_reverse_map = dict((v, k) for k, v in
reversed(sorted(compat.iteritems(_rule_aliases))))
@@ -501,6 +501,9 @@ def get_base_alias(freqstr):
_dont_uppercase = set(('MS', 'ms'))
+_LEGACY_FREQ_WARNING = 'Freq "{0}" is deprecated, use "{1}" as alternative.'
+
+
def get_offset(name):
"""
Return DateOffset object associated with rule name
@@ -513,12 +516,26 @@ def get_offset(name):
name = name.upper()
if name in _rule_aliases:
- name = _rule_aliases[name]
+ new = _rule_aliases[name]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
+ FutureWarning)
+ name = new
elif name.lower() in _rule_aliases:
- name = _rule_aliases[name.lower()]
+ new = _rule_aliases[name.lower()]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
+ FutureWarning)
+ name = new
+
+ name = _lite_rule_alias.get(name, name)
+ name = _lite_rule_alias.get(name.lower(), name)
+
else:
if name in _rule_aliases:
- name = _rule_aliases[name]
+ new = _rule_aliases[name]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(name, new),
+ FutureWarning)
+ name = new
+ name = _lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
@@ -561,6 +578,9 @@ def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
"""
+
+ # This only used in test_timeseries_legacy.py
+
name = offset.name
return _legacy_reverse_map.get(name, name)
@@ -754,10 +774,21 @@ def _period_alias_dictionary():
def _period_str_to_code(freqstr):
# hack
- freqstr = _rule_aliases.get(freqstr, freqstr)
+ if freqstr in _rule_aliases:
+ new = _rule_aliases[freqstr]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new),
+ FutureWarning)
+ freqstr = new
+ freqstr = _lite_rule_alias.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
- freqstr = _rule_aliases.get(freqstr.lower(), freqstr)
+ lower = freqstr.lower()
+ if lower in _rule_aliases:
+ new = _rule_aliases[lower]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new),
+ FutureWarning)
+ freqstr = new
+ freqstr = _lite_rule_alias.get(lower, freqstr)
try:
if freqstr not in _dont_uppercase:
@@ -766,6 +797,8 @@ def _period_str_to_code(freqstr):
except KeyError:
try:
alias = _period_alias_dict[freqstr]
+ warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, alias),
+ FutureWarning)
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 33faac153cce0..ec416efe1079f 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -2650,14 +2650,13 @@ def generate_range(start=None, end=None, periods=None,
prefix_mapping['N'] = Nano
-
def _make_offset(key):
"""Gets offset based on key. KeyError if prefix is bad, ValueError if
suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
public."""
if key is None:
return None
- split = key.replace('@', '-').split('-')
+ split = key.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too many '-')
obj = klass._from_name(*split[1:])
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index 68b65697918f4..070363460f791 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -529,9 +529,13 @@ def test_series(self):
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
- for freq in [None, 'L', 'Y']:
+ for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
+ for freq in ['Y']:
+ with tm.assert_produces_warning(FutureWarning):
+ s = Series(period_range('2013',periods=10,freq=freq))
+ self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
@@ -543,6 +547,19 @@ def test_series(self):
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
+ def test_legacy_offset_warnings(self):
+ for k, v in compat.iteritems(frequencies._rule_aliases):
+ with tm.assert_produces_warning(FutureWarning):
+ result = frequencies.get_offset(k)
+ exp = frequencies.get_offset(v)
+ self.assertEqual(result, exp)
+
+ with tm.assert_produces_warning(FutureWarning):
+ idx = date_range('2011-01-01', periods=5, freq=k)
+ exp = date_range('2011-01-01', periods=5, freq=v)
+ self.assert_index_equal(idx, exp)
+
+
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py
index d364206017c7e..b3ec88f4d0988 100644
--- a/pandas/tseries/tests/test_offsets.py
+++ b/pandas/tseries/tests/test_offsets.py
@@ -3615,7 +3615,6 @@ def test_get_offset():
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)),
- ('w@Sat', Week(weekday=5)),
("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)),
@@ -3628,6 +3627,13 @@ def test_get_offset():
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
+def test_get_offset_legacy():
+ pairs = [('w@Sat', Week(weekday=5))]
+ for name, expected in pairs:
+ with tm.assert_produces_warning(FutureWarning):
+ offset = get_offset(name)
+ assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
+ (name, expected, offset))
class TestParseTimeString(tm.TestCase):
@@ -3663,11 +3669,18 @@ def test_get_standard_freq():
assert fstr == get_standard_freq('w')
assert fstr == get_standard_freq('1w')
assert fstr == get_standard_freq(('W', 1))
- assert fstr == get_standard_freq('WeEk')
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = get_standard_freq('WeEk')
+ assert fstr == result
fstr = get_standard_freq('5Q')
assert fstr == get_standard_freq('5q')
- assert fstr == get_standard_freq('5QuarTer')
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = get_standard_freq('5QuarTer')
+ assert fstr == result
+
assert fstr == get_standard_freq(('q', 5))
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index dca37d9ce164c..cdd9d036fcadc 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -437,7 +437,7 @@ def test_properties_monthly(self):
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
- w_date = Period(freq='WK', year=2007, month=1, day=7)
+ w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
@@ -445,7 +445,22 @@ def test_properties_weekly(self):
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
- assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29)
+ assert_equal(Period(freq='W', year=2012, month=2, day=1).days_in_month, 29)
+
+ def test_properties_weekly_legacy(self):
+ # Test properties on Periods with daily frequency.
+ with tm.assert_produces_warning(FutureWarning):
+ w_date = Period(freq='WK', year=2007, month=1, day=7)
+ #
+ assert_equal(w_date.year, 2007)
+ assert_equal(w_date.quarter, 1)
+ assert_equal(w_date.month, 1)
+ assert_equal(w_date.week, 1)
+ assert_equal((w_date - 1).week, 52)
+ assert_equal(w_date.days_in_month, 31)
+ with tm.assert_produces_warning(FutureWarning):
+ exp = Period(freq='WK', year=2012, month=2, day=1)
+ assert_equal(exp.days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
@@ -613,8 +628,8 @@ def test_conv_annual(self):
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
- ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
- ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
+ ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
+ ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
@@ -643,8 +658,8 @@ def test_conv_annual(self):
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
- assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
- assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
+ assert_equal(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
+ assert_equal(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
@@ -681,8 +696,8 @@ def test_conv_quarterly(self):
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
- ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
- ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
+ ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
+ ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
@@ -711,8 +726,8 @@ def test_conv_quarterly(self):
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
- assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
- assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
+ assert_equal(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
+ assert_equal(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
@@ -739,8 +754,8 @@ def test_conv_monthly(self):
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
- ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
- ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
+ ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
+ ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
@@ -763,8 +778,8 @@ def test_conv_monthly(self):
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
- assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
- assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
+ assert_equal(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
+ assert_equal(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
@@ -781,15 +796,15 @@ def test_conv_monthly(self):
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
- ival_W = Period(freq='WK', year=2007, month=1, day=1)
+ ival_W = Period(freq='W', year=2007, month=1, day=1)
- ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
- ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
- ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
- ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
- ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
- ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
- ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
+ ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
+ ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
+ ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
+ ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
+ ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
+ ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
+ ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
@@ -806,9 +821,9 @@ def test_conv_weekly(self):
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
- ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
- ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
- ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
+ ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
+ ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
+ ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
@@ -885,7 +900,128 @@ def test_conv_weekly(self):
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
- assert_equal(ival_W.asfreq('WK'), ival_W)
+ assert_equal(ival_W.asfreq('W'), ival_W)
+
+ def test_conv_weekly_legacy(self):
+ # frequency conversion tests: from Weekly Frequency
+
+ with tm.assert_produces_warning(FutureWarning):
+ ival_W = Period(freq='WK', year=2007, month=1, day=1)
+
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
+
+ ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
+ ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
+ ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
+ ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
+ ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
+ ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
+ ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
+ ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
+ ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
+ ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
+ ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
+ ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
+ ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
+ ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
+
+ with tm.assert_produces_warning(FutureWarning):
+ ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
+ with tm.assert_produces_warning(FutureWarning):
+ ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
+ ival_W_to_A = Period(freq='A', year=2007)
+ ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
+ ival_W_to_M = Period(freq='M', year=2007, month=1)
+
+ if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
+ ival_W_to_A_end_of_year = Period(freq='A', year=2007)
+ else:
+ ival_W_to_A_end_of_year = Period(freq='A', year=2008)
+
+ if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
+ ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
+ quarter=1)
+ else:
+ ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
+ quarter=2)
+
+ if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
+ ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
+ else:
+ ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
+
+ ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
+ ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
+ ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
+ ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
+ ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
+ hour=0)
+ ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
+ hour=23)
+ ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
+ hour=0, minute=0)
+ ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
+ hour=23, minute=59)
+ ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
+ hour=0, minute=0, second=0)
+ ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
+ hour=23, minute=59, second=59)
+
+ assert_equal(ival_W.asfreq('A'), ival_W_to_A)
+ assert_equal(ival_W_end_of_year.asfreq('A'),
+ ival_W_to_A_end_of_year)
+ assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
+ assert_equal(ival_W_end_of_quarter.asfreq('Q'),
+ ival_W_to_Q_end_of_quarter)
+ assert_equal(ival_W.asfreq('M'), ival_W_to_M)
+ assert_equal(ival_W_end_of_month.asfreq('M'),
+ ival_W_to_M_end_of_month)
+
+ assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
+ assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
+
+ assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
+ assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
+
+ assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
+ assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
+ assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
+ assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
+ assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
+ assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
+ assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
+ assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
+ assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
+ assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
+ assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
+ assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
+ assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
+ assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
+
+ assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
+ assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
+ assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
+ assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
+ assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
+ assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
+
+ with tm.assert_produces_warning(FutureWarning):
+ assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
@@ -899,7 +1035,7 @@ def test_conv_business(self):
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
- ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
+ ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
@@ -920,8 +1056,8 @@ def test_conv_business(self):
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
- assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
- assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
+ assert_equal(ival_B.asfreq('W'), ival_B_to_W)
+ assert_equal(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
@@ -962,7 +1098,7 @@ def test_conv_daily(self):
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
- ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
+ ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
@@ -993,8 +1129,8 @@ def test_conv_daily(self):
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
- assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
- assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
+ assert_equal(ival_D.asfreq('W'), ival_D_to_W)
+ assert_equal(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
@@ -1031,7 +1167,7 @@ def test_conv_hourly(self):
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
- ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
+ ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
@@ -1050,8 +1186,8 @@ def test_conv_hourly(self):
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
- assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
- assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
+ assert_equal(ival_H.asfreq('W'), ival_H_to_W)
+ assert_equal(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
@@ -1087,7 +1223,7 @@ def test_conv_minutely(self):
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
- ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
+ ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
@@ -1103,8 +1239,8 @@ def test_conv_minutely(self):
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
- assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
- assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
+ assert_equal(ival_T.asfreq('W'), ival_T_to_W)
+ assert_equal(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
@@ -1142,7 +1278,7 @@ def test_conv_secondly(self):
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
- ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
+ ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
@@ -1156,8 +1292,8 @@ def test_conv_secondly(self):
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
- assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
- assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
+ assert_equal(ival_S.asfreq('W'), ival_S_to_W)
+ assert_equal(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
@@ -2171,12 +2307,17 @@ def test_to_period_annualish(self):
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
- offsets = ['MS', 'EOM', 'BM']
+ offsets = ['MS', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
+ with tm.assert_produces_warning(FutureWarning):
+ rng = date_range('01-Jan-2012', periods=8, freq='EOM')
+ prng = rng.to_period()
+ self.assertEqual(prng.freq, 'M')
+
def test_no_multiples(self):
self.assertRaises(ValueError, period_range, '1989Q3', periods=10,
freq='2Q')
diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py
index 39736eef79295..08a4056c1fce2 100644
--- a/pandas/tseries/tests/test_plotting.py
+++ b/pandas/tseries/tests/test_plotting.py
@@ -22,7 +22,7 @@
@tm.mplskip
class TestTSPlot(tm.TestCase):
def setUp(self):
- freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'Y']
+ freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
| Closes #10878.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10951 | 2015-08-31T14:29:06Z | 2015-09-01T11:05:56Z | 2015-09-01T11:05:56Z | 2015-09-01T12:22:24Z |
BUG: Index name lost in conv #10875 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3e81a923a114c..834514b603a80 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -770,7 +770,7 @@ Bug Fixes
- Bug in ``filter`` (regression from 0.16.0) and ``transform`` when grouping on multiple keys, one of which is datetime-like (:issue:`10114`)
-
+- Bug in ``to_datetime`` and ``to_timedelta`` causing ``Index`` name to be lost (:issue:`10875`)
- Bug that caused segfault when resampling an empty Series (:issue:`10228`)
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index b54e129c7a4e1..17aa6c30cd185 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -1732,6 +1732,11 @@ def test_equals_op_multiindex(self):
df.index == index_a
tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False]))
+ def test_conversion_preserves_name(self):
+ #GH 10875
+ i = pd.Index(['01:02:03', '01:02:04'], name='label')
+ self.assertEqual(i.name, pd.to_datetime(i).name)
+ self.assertEqual(i.name, pd.to_timedelta(i).name)
class TestCategoricalIndex(Base, tm.TestCase):
_holder = CategoricalIndex
diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py
index 886d6ff42ced6..282e1d603ed84 100644
--- a/pandas/tseries/timedeltas.py
+++ b/pandas/tseries/timedeltas.py
@@ -8,7 +8,7 @@
from pandas import compat
from pandas.core.common import (ABCSeries, is_integer_dtype,
is_timedelta64_dtype, is_list_like,
- isnull, _ensure_object)
+ isnull, _ensure_object, ABCIndexClass)
from pandas.util.decorators import deprecate_kwarg
@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
@@ -35,7 +35,7 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None):
"""
unit = _validate_timedelta_unit(unit)
- def _convert_listlike(arg, box, unit):
+ def _convert_listlike(arg, box, unit, name=None):
if isinstance(arg, (list,tuple)) or ((hasattr(arg,'__iter__') and not hasattr(arg,'dtype'))):
arg = np.array(list(arg), dtype='O')
@@ -51,7 +51,7 @@ def _convert_listlike(arg, box, unit):
if box:
from pandas import TimedeltaIndex
- value = TimedeltaIndex(value,unit='ns')
+ value = TimedeltaIndex(value,unit='ns', name=name)
return value
if arg is None:
@@ -60,6 +60,8 @@ def _convert_listlike(arg, box, unit):
from pandas import Series
values = _convert_listlike(arg.values, box=False, unit=unit)
return Series(values, index=arg.index, name=arg.name, dtype='m8[ns]')
+ elif isinstance(arg, ABCIndexClass):
+ return _convert_listlike(arg, box=box, unit=unit, name=arg.name)
elif is_list_like(arg):
return _convert_listlike(arg, box=box, unit=unit)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 6f08448b47b1e..efd1ff9ba34fd 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -8,6 +8,7 @@
import pandas.tslib as tslib
import pandas.core.common as com
from pandas.compat import StringIO, callable
+from pandas.core.common import ABCIndexClass
import pandas.compat as compat
from pandas.util.decorators import deprecate_kwarg
@@ -277,7 +278,7 @@ def _to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
from pandas.core.series import Series
from pandas.tseries.index import DatetimeIndex
- def _convert_listlike(arg, box, format):
+ def _convert_listlike(arg, box, format, name=None):
if isinstance(arg, (list,tuple)):
arg = np.array(arg, dtype='O')
@@ -286,7 +287,7 @@ def _convert_listlike(arg, box, format):
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
- return DatetimeIndex(arg, tz='utc' if utc else None)
+ return DatetimeIndex(arg, tz='utc' if utc else None, name=name)
except ValueError:
pass
@@ -294,7 +295,7 @@ def _convert_listlike(arg, box, format):
elif format is None and com.is_integer_dtype(arg) and unit=='ns':
result = arg.astype('datetime64[ns]')
if box:
- return DatetimeIndex(result, tz='utc' if utc else None)
+ return DatetimeIndex(result, tz='utc' if utc else None, name=name)
return result
@@ -355,13 +356,13 @@ def _convert_listlike(arg, box, format):
require_iso8601=require_iso8601)
if com.is_datetime64_dtype(result) and box:
- result = DatetimeIndex(result, tz='utc' if utc else None)
+ result = DatetimeIndex(result, tz='utc' if utc else None, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
- return DatetimeIndex._simple_new(values, None, tz=tz)
+ return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
@@ -372,6 +373,8 @@ def _convert_listlike(arg, box, format):
elif isinstance(arg, Series):
values = _convert_listlike(arg.values, False, format)
return Series(values, index=arg.index, name=arg.name)
+ elif isinstance(arg, ABCIndexClass):
+ return _convert_listlike(arg, box, format, name=arg.name)
elif com.is_list_like(arg):
return _convert_listlike(arg, box, format)
| Addresses #10875, when `Index` is converted via `to_datetime`, `to_timedelta`
part of #9862 master issue
| https://api.github.com/repos/pandas-dev/pandas/pulls/10945 | 2015-08-30T22:43:26Z | 2015-08-31T12:18:36Z | 2015-08-31T12:18:36Z | 2015-08-31T23:37:24Z |
BUG: passing columns and dict with scalar values should raise error | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e9d39e0441055..70d70a2b76a81 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -836,3 +836,4 @@ Bug Fixes
- Bug in ``to_json`` which was causing segmentation fault when serializing 0-rank ndarray (:issue:`9576`)
- Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`)
- Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`)
+- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 997dfeb728ade..acf5e69bf05e3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -298,13 +298,18 @@ def _init_dict(self, data, index, columns, dtype=None):
if columns is not None:
columns = _ensure_index(columns)
- # prefilter if columns passed
+ # GH10856
+ # raise ValueError if only scalars in dict
+ if index is None:
+ extract_index(list(data.values()))
+ # prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
+
else:
index = _ensure_index(index)
@@ -330,6 +335,7 @@ def _init_dict(self, data, index, columns, dtype=None):
v = data[k]
data_names.append(k)
arrays.append(v)
+
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index d7b5a9811bc5b..9bdb7f08fe7cf 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -2762,6 +2762,17 @@ def test_constructor_dict(self):
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assertTrue(frame.index.equals(Index([])))
+ # GH10856
+ # dict with scalar values should raise error, even if columns passed
+ with tm.assertRaises(ValueError):
+ DataFrame({'a': 0.7})
+
+ with tm.assertRaises(ValueError):
+ DataFrame({'a': 0.7}, columns=['a'])
+
+ with tm.assertRaises(ValueError):
+ DataFrame({'a': 0.7}, columns=['b'])
+
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
| Fixes [GH10856](https://github.com/pydata/pandas/issues/10856).
``` Python
>>> pd.DataFrame({'a':0.1}, columns=['b'])
ValueError: If using all scalar values, you must pass an index
```
Trying to raise this error was slightly trickier than I anticipated - this was the only way that didn't break existing tests. If no index is passed to the constructor, `extract_index` is called to check whether the dictionary contacts only scalar values (and raises the ValueError if so).
This check now happens _prior_ to preselecting any columns. If people are happy with this approach I can write tests for the PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10943 | 2015-08-30T14:41:37Z | 2015-09-02T11:51:18Z | 2015-09-02T11:51:18Z | 2015-09-02T20:14:50Z |
BUG: fixing bug in groupby_indices benchmark | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index a84a5373651bb..f1ac09b8b2516 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -212,7 +212,7 @@ class groupby_indices(object):
def setup(self):
try:
self.rng = date_range('1/1/2000', '12/31/2005', freq='H')
- (year, month, day) = (self.rng.year, self.rng.month, self.rng.day)
+ (self.year, self.month, self.day) = (self.rng.year, self.rng.month, self.rng.day)
except:
self.rng = date_range('1/1/2000', '12/31/2000', offset=datetools.Hour())
self.year = self.rng.map((lambda x: x.year))
@@ -1690,4 +1690,4 @@ def setup(self):
self.s = Series(np.tile(self.uniques, (self.N // self.K)))
def time_series_value_counts_strings(self):
- self.s.value_counts()
\ No newline at end of file
+ self.s.value_counts()
| https://api.github.com/repos/pandas-dev/pandas/pulls/10942 | 2015-08-30T14:15:09Z | 2015-08-31T00:56:45Z | 2015-08-31T00:56:45Z | 2015-08-31T00:56:49Z | |
Edit DOC: consistent imports (GH9886) part V | diff --git a/doc/source/merging.rst b/doc/source/merging.rst
index d51c2f62b8a0c..c62647010a131 100644
--- a/doc/source/merging.rst
+++ b/doc/source/merging.rst
@@ -6,9 +6,8 @@
import numpy as np
np.random.seed(123456)
- from numpy import nan
- from pandas import *
- options.display.max_rows=15
+ import pandas as pd
+ pd.options.display.max_rows=15
randn = np.random.randn
np.set_printoptions(precision=4, suppress=True)
@@ -43,26 +42,26 @@ a simple example:
.. ipython:: python
- df1 = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=[0, 1, 2, 3])
+ df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3'],
+ 'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3']},
+ index=[0, 1, 2, 3])
- df2 = DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
- 'B': ['B4', 'B5', 'B6', 'B7'],
- 'C': ['C4', 'C5', 'C6', 'C7'],
- 'D': ['D4', 'D5', 'D6', 'D7']},
- index=[4, 5, 6, 7])
+ df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
+ 'B': ['B4', 'B5', 'B6', 'B7'],
+ 'C': ['C4', 'C5', 'C6', 'C7'],
+ 'D': ['D4', 'D5', 'D6', 'D7']},
+ index=[4, 5, 6, 7])
- df3 = DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
- 'B': ['B8', 'B9', 'B10', 'B11'],
- 'C': ['C8', 'C9', 'C10', 'C11'],
- 'D': ['D8', 'D9', 'D10', 'D11']},
- index=[8, 9, 10, 11])
+ df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
+ 'B': ['B8', 'B9', 'B10', 'B11'],
+ 'C': ['C8', 'C9', 'C10', 'C11'],
+ 'D': ['D8', 'D9', 'D10', 'D11']},
+ index=[8, 9, 10, 11])
frames = [df1, df2, df3]
- result = concat(frames)
+ result = pd.concat(frames)
.. ipython:: python
:suppress:
@@ -78,7 +77,7 @@ some configurable handling of "what to do with the other axes":
::
- concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
+ pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False)
- ``objs``: list or dict of Series, DataFrame, or Panel objects. If a dict is
@@ -112,7 +111,7 @@ this using the ``keys`` argument:
.. ipython:: python
- result = concat(frames, keys=['x', 'y', 'z'])
+ result = pd.concat(frames, keys=['x', 'y', 'z'])
.. ipython:: python
:suppress:
@@ -163,11 +162,11 @@ behavior:
.. ipython:: python
- df4 = DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
+ df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
'D': ['D2', 'D3', 'D6', 'D7'],
'F': ['F2', 'F3', 'F6', 'F7']},
index=[2, 3, 6, 7])
- result = concat([df1, df4], axis=1)
+ result = pd.concat([df1, df4], axis=1)
.. ipython:: python
@@ -183,7 +182,7 @@ with ``join='inner'``:
.. ipython:: python
- result = concat([df1, df4], axis=1, join='inner')
+ result = pd.concat([df1, df4], axis=1, join='inner')
.. ipython:: python
:suppress:
@@ -198,7 +197,7 @@ DataFrame:
.. ipython:: python
- result = concat([df1, df4], axis=1, join_axes=[df1.index])
+ result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
.. ipython:: python
:suppress:
@@ -275,7 +274,7 @@ To do this, use the ``ignore_index`` argument:
.. ipython:: python
- result = concat([df1, df4], ignore_index=True)
+ result = pd.concat([df1, df4], ignore_index=True)
.. ipython:: python
:suppress:
@@ -310,8 +309,8 @@ the name of the Series.
.. ipython:: python
- s1 = Series(['X0', 'X1', 'X2', 'X3'], name='X')
- result = concat([df1, s1], axis=1)
+ s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X')
+ result = pd.concat([df1, s1], axis=1)
.. ipython:: python
:suppress:
@@ -325,8 +324,8 @@ If unnamed Series are passed they will be numbered consecutively.
.. ipython:: python
- s2 = Series(['_0', '_1', '_2', '_3'])
- result = concat([df1, s2, s2, s2], axis=1)
+ s2 = pd.Series(['_0', '_1', '_2', '_3'])
+ result = pd.concat([df1, s2, s2, s2], axis=1)
.. ipython:: python
:suppress:
@@ -340,7 +339,7 @@ Passing ``ignore_index=True`` will drop all name references.
.. ipython:: python
- result = concat([df1, s1], axis=1, ignore_index=True)
+ result = pd.concat([df1, s1], axis=1, ignore_index=True)
.. ipython:: python
:suppress:
@@ -357,7 +356,7 @@ Let's consider a variation on the first example presented:
.. ipython:: python
- result = concat(frames, keys=['x', 'y', 'z'])
+ result = pd.concat(frames, keys=['x', 'y', 'z'])
.. ipython:: python
:suppress:
@@ -373,7 +372,7 @@ for the ``keys`` argument (unless other keys are specified):
.. ipython:: python
pieces = {'x': df1, 'y': df2, 'z': df3}
- result = concat(pieces)
+ result = pd.concat(pieces)
.. ipython:: python
:suppress:
@@ -385,7 +384,7 @@ for the ``keys`` argument (unless other keys are specified):
.. ipython:: python
- result = concat(pieces, keys=['z', 'y'])
+ result = pd.concat(pieces, keys=['z', 'y'])
.. ipython:: python
:suppress:
@@ -407,7 +406,7 @@ do so using the ``levels`` argument:
.. ipython:: python
- result = concat(pieces, keys=['x', 'y', 'z'],
+ result = pd.concat(pieces, keys=['x', 'y', 'z'],
levels=[['z', 'y', 'x', 'w']],
names=['group_key'])
@@ -437,7 +436,7 @@ which returns a new DataFrame as above.
.. ipython:: python
- s2 = Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D'])
+ s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D'])
result = df1.append(s2, ignore_index=True)
.. ipython:: python
@@ -464,7 +463,7 @@ You can also pass a list of dicts or Series:
:suppress:
@savefig merging_append_dits.png
- p.plot([df1, DataFrame(dicts)], result,
+ p.plot([df1, pd.DataFrame(dicts)], result,
labels=['df1', 'dicts'], vertical=True);
plt.close('all');
@@ -490,9 +489,9 @@ standard database join operations between DataFrame objects:
::
- merge(left, right, how='inner', on=None, left_on=None, right_on=None,
- left_index=False, right_index=False, sort=True,
- suffixes=('_x', '_y'), copy=True)
+ pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None,
+ left_index=False, right_index=False, sort=True,
+ suffixes=('_x', '_y'), copy=True)
Here's a description of what each argument is for:
@@ -566,14 +565,14 @@ key combination:
.. ipython:: python
- left = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
- 'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3']})
+ left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
+ 'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3']})
- right = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']})
- result = merge(left, right, on='key')
+ right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
+ 'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3']})
+ result = pd.merge(left, right, on='key')
.. ipython:: python
:suppress:
@@ -587,17 +586,17 @@ Here is a more complicated example with multiple join keys:
.. ipython:: python
- left = DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
- 'key2': ['K0', 'K1', 'K0', 'K1'],
- 'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3']})
+ left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
+ 'key2': ['K0', 'K1', 'K0', 'K1'],
+ 'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3']})
- right = DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
- 'key2': ['K0', 'K0', 'K0', 'K0'],
- 'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']})
+ right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
+ 'key2': ['K0', 'K0', 'K0', 'K0'],
+ 'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3']})
- result = merge(left, right, on=['key1', 'key2'])
+ result = pd.merge(left, right, on=['key1', 'key2'])
.. ipython:: python
:suppress:
@@ -623,7 +622,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = merge(left, right, how='left', on=['key1', 'key2'])
+ result = pd.merge(left, right, how='left', on=['key1', 'key2'])
.. ipython:: python
:suppress:
@@ -635,7 +634,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = merge(left, right, how='right', on=['key1', 'key2'])
+ result = pd.merge(left, right, how='right', on=['key1', 'key2'])
.. ipython:: python
:suppress:
@@ -646,7 +645,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = merge(left, right, how='outer', on=['key1', 'key2'])
+ result = pd.merge(left, right, how='outer', on=['key1', 'key2'])
.. ipython:: python
:suppress:
@@ -658,7 +657,7 @@ either the left or right tables, the values in the joined table will be
.. ipython:: python
- result = merge(left, right, how='inner', on=['key1', 'key2'])
+ result = pd.merge(left, right, how='inner', on=['key1', 'key2'])
.. ipython:: python
:suppress:
@@ -679,13 +678,13 @@ is a very basic example:
.. ipython:: python
- left = DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=['K0', 'K1', 'K2'])
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
+ 'B': ['B0', 'B1', 'B2']},
+ index=['K0', 'K1', 'K2'])
- right = DataFrame({'C': ['C0', 'C2', 'C3'],
- 'D': ['D0', 'D2', 'D3']},
- index=['K0', 'K2', 'K3'])
+ right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
+ 'D': ['D0', 'D2', 'D3']},
+ index=['K0', 'K2', 'K3'])
result = left.join(right)
@@ -727,7 +726,7 @@ indexes:
.. ipython:: python
- result = merge(left, right, left_index=True, right_index=True, how='outer')
+ result = pd.merge(left, right, left_index=True, right_index=True, how='outer')
.. ipython:: python
:suppress:
@@ -739,7 +738,7 @@ indexes:
.. ipython:: python
- result = merge(left, right, left_index=True, right_index=True, how='inner');
+ result = pd.merge(left, right, left_index=True, right_index=True, how='inner');
.. ipython:: python
:suppress:
@@ -760,7 +759,7 @@ equivalent:
::
left.join(right, on=key_or_keys)
- merge(left, right, left_on=key_or_keys, right_index=True,
+ pd.merge(left, right, left_on=key_or_keys, right_index=True,
how='left', sort=False)
Obviously you can choose whichever form you find more convenient. For
@@ -769,13 +768,13 @@ key), using ``join`` may be more convenient. Here is a simple example:
.. ipython:: python
- left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'key': ['K0', 'K1', 'K0', 'K1']})
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3'],
+ 'key': ['K0', 'K1', 'K0', 'K1']})
- right = DataFrame({'C': ['C0', 'C1'],
- 'D': ['D0', 'D1']},
- index=['K0', 'K1'])
+ right = pd.DataFrame({'C': ['C0', 'C1'],
+ 'D': ['D0', 'D1']},
+ index=['K0', 'K1'])
result = left.join(right, on='key')
@@ -789,8 +788,8 @@ key), using ``join`` may be more convenient. Here is a simple example:
.. ipython:: python
- result = merge(left, right, left_on='key', right_index=True,
- how='left', sort=False);
+ result = pd.merge(left, right, left_on='key', right_index=True,
+ how='left', sort=False);
.. ipython:: python
:suppress:
@@ -806,14 +805,14 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``:
.. ipython:: python
- left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
- 'B': ['B0', 'B1', 'B2', 'B3'],
- 'key1': ['K0', 'K0', 'K1', 'K2'],
- 'key2': ['K0', 'K1', 'K0', 'K1']})
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
+ 'B': ['B0', 'B1', 'B2', 'B3'],
+ 'key1': ['K0', 'K0', 'K1', 'K2'],
+ 'key2': ['K0', 'K1', 'K0', 'K1']})
- index = MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'),
- ('K2', 'K0'), ('K2', 'K1')])
- right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
+ index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'),
+ ('K2', 'K0'), ('K2', 'K1')])
+ right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=index)
@@ -865,16 +864,16 @@ a level name of the multi-indexed frame.
.. ipython:: python
- left = DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=Index(['K0', 'K1', 'K2'], name='key'))
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
+ 'B': ['B0', 'B1', 'B2']},
+ index=Index(['K0', 'K1', 'K2'], name='key'))
- index = MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'),
- ('K2', 'Y2'), ('K2', 'Y3')],
- names=['key', 'Y'])
- right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=index)
+ index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'),
+ ('K2', 'Y2'), ('K2', 'Y3')],
+ names=['key', 'Y'])
+ right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
+ 'D': ['D0', 'D1', 'D2', 'D3']},
+ index=index)
result = left.join(right, how='inner')
@@ -890,7 +889,7 @@ This is equivalent but less verbose and more memory efficient / faster than this
.. ipython:: python
- result = merge(left.reset_index(), right.reset_index(),
+ result = pd.merge(left.reset_index(), right.reset_index(),
on=['key'], how='inner').set_index(['key','Y'])
.. ipython:: python
@@ -908,15 +907,15 @@ This is not Implemented via ``join`` at-the-moment, however it can be done using
.. ipython:: python
- index = MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'),
- ('K1', 'X2')],
- names=['key', 'X'])
- left = DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=index)
+ index = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'),
+ ('K1', 'X2')],
+ names=['key', 'X'])
+ left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
+ 'B': ['B0', 'B1', 'B2']},
+ index=index)
- result = merge(left.reset_index(), right.reset_index(),
- on=['key'], how='inner').set_index(['key','X','Y'])
+ result = pd.merge(left.reset_index(), right.reset_index(),
+ on=['key'], how='inner').set_index(['key','X','Y'])
.. ipython:: python
:suppress:
@@ -935,10 +934,10 @@ columns:
.. ipython:: python
- left = DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]})
- right = DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]})
+ left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]})
+ right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]})
- result = merge(left, right, on='k')
+ result = pd.merge(left, right, on='k')
.. ipython:: python
:suppress:
@@ -950,7 +949,7 @@ columns:
.. ipython:: python
- result = merge(left, right, on='k', suffixes=['_l', '_r'])
+ result = pd.merge(left, right, on='k', suffixes=['_l', '_r'])
.. ipython:: python
:suppress:
@@ -987,7 +986,7 @@ them together on their indexes. The same is true for ``Panel.join``.
.. ipython:: python
- right2 = DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2'])
+ right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2'])
result = left.join([right, right2])
.. ipython:: python
@@ -1037,10 +1036,10 @@ object from values for matching indices in the other. Here is an example:
.. ipython:: python
- df1 = DataFrame([[nan, 3., 5.], [-4.6, np.nan, nan],
- [nan, 7., nan]])
- df2 = DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]],
- index=[1, 2])
+ df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan],
+ [np.nan, 7., np.nan]])
+ df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]],
+ index=[1, 2])
For this, use the ``combine_first`` method:
@@ -1075,4 +1074,4 @@ values inplace:
@savefig merging_update.png
p.plot([df1_copy, df2], df1,
labels=['df1', 'df2'], vertical=False);
- plt.close('all');
+ plt.close('all');
\ No newline at end of file
| Updated - but no idea why this is such a messed up branch.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10941 | 2015-08-30T14:07:24Z | 2015-09-01T09:17:33Z | 2015-09-01T09:17:33Z | 2015-09-01T09:18:15Z |
ENH: Add Series.dt.total_seconds GH #10817 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 3e81a923a114c..b7feec3895f97 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -176,6 +176,10 @@ Other enhancements
- ``pandas.tseries.offsets`` larger than the ``Day`` offset can now be used with with ``Series`` for addition/subtraction (:issue:`10699`). See the :ref:`Documentation <timeseries.offsetseries>` for more details.
+- ``pd.Series`` of type ``timedelta64`` has new method ``.dt.total_seconds()`` returning the duration of the timedelta in seconds (:issue: `10817`)
+
+- ``pd.Timedelta.total_seconds()`` now returns Timedelta duration to ns precision (previously microsecond precision) (:issue: `10939`)
+
- ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`)
- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`).
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 34ea674fe10c0..86eafdf7ca2c8 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -89,7 +89,7 @@ def test_dt_namespace_accessor(self):
'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime']
ok_for_td = ['days','seconds','microseconds','nanoseconds']
- ok_for_td_methods = ['components','to_pytimedelta']
+ ok_for_td_methods = ['components','to_pytimedelta','total_seconds']
def get_expected(s, name):
result = getattr(Index(s.values),prop)
@@ -157,6 +157,10 @@ def compare(s, name):
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
+
+ result = s.dt.total_seconds()
+ self.assertIsInstance(result,pd.Series)
+ self.assertTrue(result.dtype == 'float64')
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index a4d5939d386ae..9a282bec2e9e4 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -161,7 +161,7 @@ def components(self):
accessors=TimedeltaIndex._datetimelike_ops,
typ='property')
TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex,
- accessors=["to_pytimedelta"],
+ accessors=["to_pytimedelta", "total_seconds"],
typ='method')
class PeriodProperties(Properties):
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index b0c9d8852f8c9..984f2a1cec706 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -391,6 +391,10 @@ def f(x):
result = result.astype('int64')
return result
+ def total_seconds(self):
+ """ Total duration of each element expressed in seconds. """
+ return self._maybe_mask_results(1e-9*self.asi8)
+
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 4870fbd55f33e..eef0894bdd349 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -19,6 +19,7 @@
assert_almost_equal,
assert_index_equal,
ensure_clean)
+from numpy.testing import assert_allclose
from pandas.tseries.offsets import Day, Second, Hour
import pandas.util.testing as tm
from numpy.random import rand, randn
@@ -945,6 +946,36 @@ def test_fields(self):
tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1]))
tm.assert_series_equal(s.dt.seconds,Series([10*3600+11*60+12,np.nan],index=[0,1]))
+ def test_total_seconds(self):
+ # GH 10939
+ # test index
+ rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s')
+ expt = [1*86400+10*3600+11*60+12+100123456./1e9,1*86400+10*3600+11*60+13+100123456./1e9]
+ assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
+
+ # test Series
+ s = Series(rng)
+ s_expt = Series(expt,index=[0,1])
+ tm.assert_series_equal(s.dt.total_seconds(),s_expt)
+
+ # with nat
+ s[1] = np.nan
+ s_expt = Series([1*86400+10*3600+11*60+12+100123456./1e9,np.nan],index=[0,1])
+ tm.assert_series_equal(s.dt.total_seconds(),s_expt)
+
+ # with both nat
+ s = Series([np.nan,np.nan], dtype='timedelta64[ns]')
+ tm.assert_series_equal(s.dt.total_seconds(),Series([np.nan,np.nan],index=[0,1]))
+
+ def test_total_seconds_scalar(self):
+ # GH 10939
+ rng = Timedelta('1 days, 10:11:12.100123456')
+ expt = 1*86400+10*3600+11*60+12+100123456./1e9
+ assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0)
+
+ rng = Timedelta(np.nan)
+ self.assertTrue(np.isnan(rng.total_seconds()))
+
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 369993b4c54d1..226cfc843b3cf 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -642,6 +642,10 @@ class NaTType(_NaT):
def __reduce__(self):
return (__nat_unpickle, (None, ))
+
+ def total_seconds(self):
+ # GH 10939
+ return np.nan
fields = ['year', 'quarter', 'month', 'day', 'hour',
@@ -673,7 +677,7 @@ def _make_nan_func(func_name):
_nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today']
-_nan_methods = ['weekday', 'isoweekday']
+_nan_methods = ['weekday', 'isoweekday', 'total_seconds']
_implemented_methods = ['to_datetime64']
_implemented_methods.extend(_nat_methods)
@@ -2412,6 +2416,12 @@ class Timedelta(_Timedelta):
"""
self._ensure_components()
return self._ns
+
+ def total_seconds(self):
+ """
+ Total duration of timedelta in seconds (to ns precision)
+ """
+ return 1e-9*self.value
def __setstate__(self, state):
(value) = state
| Implements a Series.dt.total_seconds method for timedelta64 Series.
closes #10817
| https://api.github.com/repos/pandas-dev/pandas/pulls/10939 | 2015-08-30T12:44:17Z | 2015-09-02T11:52:40Z | 2015-09-02T11:52:39Z | 2015-09-02T20:13:16Z |
updating docs for the new sorting mechanisms - GH #10886 | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 1714e00030026..359ec76533520 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -157,7 +157,7 @@ Sorting by values
.. ipython:: python
- df.sort(columns='B')
+ df.sort_values(by='B')
Selection
---------
@@ -680,7 +680,7 @@ Sorting is per order in the categories, not lexical order.
.. ipython:: python
- df.sort("grade")
+ df.sort_values(by="grade")
Grouping by a categorical column shows also empty categories.
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 850f59c2713eb..973674fe62745 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -286,7 +286,7 @@ As usual, **both sides** of the slicers are included as this is label indexing.
names=['lvl0', 'lvl1'])
dfmi = pd.DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))),
index=miindex,
- columns=micolumns).sortlevel().sortlevel(axis=1)
+ columns=micolumns).sort_index().sort_index(axis=1)
dfmi
Basic multi-index slicing using slices, lists, and labels.
@@ -458,7 +458,7 @@ correctly. You can think about breaking the axis into unique groups, where at
the hierarchical level of interest, each distinct group shares a label, but no
two have the same label. However, the ``MultiIndex`` does not enforce this:
**you are responsible for ensuring that things are properly sorted**. There is
-an important new method ``sortlevel`` to sort an axis within a ``MultiIndex``
+an important new method ``sort_index`` to sort an axis within a ``MultiIndex``
so that its labels are grouped and sorted by the original ordering of the
associated factor at that level. Note that this does not necessarily mean the
labels will be sorted lexicographically!
@@ -468,19 +468,19 @@ labels will be sorted lexicographically!
import random; random.shuffle(tuples)
s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples))
s
- s.sortlevel(0)
- s.sortlevel(1)
+ s.sort_index(level=0)
+ s.sort_index(level=1)
.. _advanced.sortlevel_byname:
-Note, you may also pass a level name to ``sortlevel`` if the MultiIndex levels
+Note, you may also pass a level name to ``sort_index`` if the MultiIndex levels
are named.
.. ipython:: python
s.index.set_names(['L1', 'L2'], inplace=True)
- s.sortlevel(level='L1')
- s.sortlevel(level='L2')
+ s.sort_index(level='L1')
+ s.sort_index(level='L2')
Some indexing will work even if the data are not sorted, but will be rather
inefficient and will also return a copy of the data rather than a view:
@@ -488,14 +488,14 @@ inefficient and will also return a copy of the data rather than a view:
.. ipython:: python
s['qux']
- s.sortlevel(1)['qux']
+ s.sort_index(level=1)['qux']
On higher dimensional objects, you can sort any of the other axes by level if
they have a MultiIndex:
.. ipython:: python
- df.T.sortlevel(1, axis=1)
+ df.T.sort_index(level=1, axis=1)
The ``MultiIndex`` object has code to **explicity check the sort depth**. Thus,
if you try to index at a depth at which the index is not sorted, it will raise
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 956c90ae63034..3ea90447dd44f 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -328,7 +328,7 @@ equality to be True:
df1 = pd.DataFrame({'col':['foo', 0, np.nan]})
df2 = pd.DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0])
df1.equals(df2)
- df1.equals(df2.sort())
+ df1.equals(df2.sort_index())
Comparing array-like objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1489,7 +1489,7 @@ The ``by`` argument can take a list of column names, e.g.:
.. ipython:: python
- df1[['one', 'two', 'three']].sort_index(by=['one','two'])
+ df1[['one', 'two', 'three']].sort_values(by=['one','two'])
These methods have special treatment of NA values via the ``na_position``
argument:
@@ -1497,8 +1497,8 @@ argument:
.. ipython:: python
s[2] = np.nan
- s.order()
- s.order(na_position='first')
+ s.sort_values()
+ s.sort_values(na_position='first')
.. _basics.searchsorted:
@@ -1564,7 +1564,7 @@ all levels to ``by``.
.. ipython:: python
df1.columns = pd.MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')])
- df1.sort_index(by=('a','two'))
+ df1.sort_values(by=('a','two'))
Copying
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 0c63759201517..3c9b538caa555 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -280,9 +280,9 @@ meaning and certain operations are possible. If the categorical is unordered, ``
.. ipython:: python
s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=False))
- s.sort()
+ s.sort_values(inplace=True)
s = pd.Series(["a","b","c","a"]).astype('category', ordered=True)
- s.sort()
+ s.sort_values(inplace=True)
s
s.min(), s.max()
@@ -302,7 +302,7 @@ This is even true for strings and numeric data:
s = pd.Series([1,2,3,1], dtype="category")
s = s.cat.set_categories([2,3,1], ordered=True)
s
- s.sort()
+ s.sort_values(inplace=True)
s
s.min(), s.max()
@@ -320,7 +320,7 @@ necessarily make the sort order the same as the categories order.
s = pd.Series([1,2,3,1], dtype="category")
s = s.cat.reorder_categories([2,3,1], ordered=True)
s
- s.sort()
+ s.sort_values(inplace=True)
s
s.min(), s.max()
@@ -349,14 +349,14 @@ The ordering of the categorical is determined by the ``categories`` of that colu
dfs = pd.DataFrame({'A' : pd.Categorical(list('bbeebbaa'), categories=['e','a','b'], ordered=True),
'B' : [1,2,1,2,2,1,2,1] })
- dfs.sort(['A', 'B'])
+ dfs.sort_values(by=['A', 'B'])
Reordering the ``categories`` changes a future sort.
.. ipython:: python
dfs['A'] = dfs['A'].cat.reorder_categories(['a','b','e'])
- dfs.sort(['A','B'])
+ dfs.sort_values(by=['A','B'])
Comparisons
-----------
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 9e7b9ad0b7582..0b05f062f5fce 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -309,7 +309,7 @@ Method 2 : sort then take first of each
.. ipython:: python
- df.sort("BBB").groupby("AAA", as_index=False).first()
+ df.sort_values(by="BBB").groupby("AAA", as_index=False).first()
Notice the same results, with the exception of the index.
@@ -410,7 +410,7 @@ Sorting
.. ipython:: python
- df.sort(('Labs', 'II'), ascending=False)
+ df.sort_values(by=('Labs', 'II'), ascending=False)
`Partial Selection, the need for sortedness;
<https://github.com/pydata/pandas/issues/2995>`__
@@ -547,7 +547,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
code_groups = df.groupby('code')
- agg_n_sort_order = code_groups[['data']].transform(sum).sort('data')
+ agg_n_sort_order = code_groups[['data']].transform(sum).sort_values(by='data')
sorted_df = df.ix[agg_n_sort_order.index]
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 26aaf9c2be69d..dbf3b838593a9 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -164,9 +164,9 @@ will result in a **sorted** copy of the original DataFrame or Series:
index = pd.MultiIndex.from_product([[2,1], ['a', 'b']])
df = pd.DataFrame(np.random.randn(4), index=index, columns=['A'])
df
- all(df.unstack().stack() == df.sort())
+ all(df.unstack().stack() == df.sort_index())
-while the above code will raise a ``TypeError`` if the call to ``sort`` is
+while the above code will raise a ``TypeError`` if the call to ``sort_index`` is
removed.
.. _reshaping.stack_multiple:
@@ -206,7 +206,7 @@ Missing Data
These functions are intelligent about handling missing data and do not expect
each subgroup within the hierarchical index to have the same set of labels.
They also can handle the index being unsorted (but you can make it sorted by
-calling ``sortlevel``, of course). Here is a more complex example:
+calling ``sort_index``, of course). Here is a more complex example:
.. ipython:: python
diff --git a/doc/source/whatsnew/v0.13.1.txt b/doc/source/whatsnew/v0.13.1.txt
index 64ca1612f00c1..349acf508bbf3 100644
--- a/doc/source/whatsnew/v0.13.1.txt
+++ b/doc/source/whatsnew/v0.13.1.txt
@@ -120,7 +120,8 @@ API changes
equal. (:issue:`5283`) See also :ref:`the docs<basics.equals>` for a motivating example.
.. ipython:: python
-
+ :okwarning:
+
df = DataFrame({'col':['foo', 0, np.nan]})
df2 = DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0])
df.equals(df2)
diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt
index 01dc8bb080726..a33e0f19961ab 100644
--- a/doc/source/whatsnew/v0.15.0.txt
+++ b/doc/source/whatsnew/v0.15.0.txt
@@ -67,7 +67,8 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the
:ref:`API documentation <api.categorical>`.
.. ipython:: python
-
+ :okwarning:
+
df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = df["raw_grade"].astype("category")
diff --git a/doc/source/whatsnew/v0.7.3.txt b/doc/source/whatsnew/v0.7.3.txt
index afb4b8faac2cc..21aa16e5fcb06 100644
--- a/doc/source/whatsnew/v0.7.3.txt
+++ b/doc/source/whatsnew/v0.7.3.txt
@@ -83,6 +83,7 @@ When calling ``apply`` on a grouped Series, the return value will also be a
Series, to be more consistent with the ``groupby`` behavior with DataFrame:
.. ipython:: python
+ :okwarning:
df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
@@ -93,4 +94,3 @@ Series, to be more consistent with the ``groupby`` behavior with DataFrame:
grouped = df.groupby('A')['C']
grouped.describe()
grouped.apply(lambda x: x.order()[-2:]) # top 2 values
-
diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt
index 6718a049a0ab9..ce7439b8ecd92 100644
--- a/doc/source/whatsnew/v0.9.1.txt
+++ b/doc/source/whatsnew/v0.9.1.txt
@@ -21,6 +21,7 @@ New features
specified in a per-column manner to support multiple sort orders (:issue:`928`)
.. ipython:: python
+ :okwarning:
df = DataFrame(np.random.randint(0, 2, (6, 3)), columns=['A', 'B', 'C'])
@@ -66,7 +67,7 @@ New features
.. ipython:: python
df[df>0]
-
+
df.where(df>0)
df.where(df>0,-df)
| closes #10886
I didn't notice warnings or errors associated with those modifications when building the docs.
No more `FutureWarning` either (except in a the v. 0.13 what's new, which is legit)...
| https://api.github.com/repos/pandas-dev/pandas/pulls/10937 | 2015-08-30T11:17:01Z | 2015-08-31T01:03:13Z | 2015-08-31T01:03:13Z | 2015-08-31T08:18:13Z |
DOC: clarification on -b flag in asv | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 2c9b6a0a889f4..5d26ca2414690 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -517,7 +517,7 @@ performance regressions. *pandas* is in the process of migrating to the
`asv library <https://github.com/spacetelescope/asv>`__
to enable easy monitoring of the performance of critical *pandas* operations.
These benchmarks are all found in the ``pandas/asv_bench`` directory. *asv*
-supports both python2 and python3.
+supports both python2 and python3.
.. note::
@@ -525,7 +525,10 @@ supports both python2 and python3.
so many stylistic issues are likely a result of automated transformation of the
code.
-To install asv::
+To use ''asv'' you will need either ''conda'' or ''virtualenv''. For more details
+please check installation webpage http://asv.readthedocs.org/en/latest/installing.html
+
+To install ''asv''::
pip install git+https://github.com/spacetelescope/asv
@@ -546,6 +549,25 @@ to the Pull Request to show that the committed changes do not cause unexpected
performance regressions.
You can run specific benchmarks using the *-b* flag which takes a regular expression.
+For example this will only run tests from a ``pandas/asv_bench/benchmarks/groupby.py``
+file::
+
+ asv continuous master -b groupby
+
+If you want to run only some specific group of tests from a file you can do it
+using ``.`` as a separator. For example::
+
+ asv continuous master -b groupby.groupby_agg_builtins1
+
+will only run a ``groupby_agg_builtins1`` test defined in a ``groupby`` file.
+
+It is also useful to run tests in your current environment. You can simply do it by::
+
+ asv dev
+
+which would be equivalent to ``asv run --quick --show-stderr --python=same``. This
+will launch every test only once, display stderr from the benchmarks and use your
+local ``python'' that comes from your $PATH.
Information on how to write a benchmark can be found in
`*asv*'s documentation http://asv.readthedocs.org/en/latest/writing_benchmarks.html`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10935 | 2015-08-30T11:06:37Z | 2015-08-31T01:05:17Z | 2015-08-31T01:05:17Z | 2015-08-31T01:05:22Z | |
Add tests to ensure sort preserved by groupby, add docs | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index acddf1bb3fe30..b5a382ce24342 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -160,6 +160,31 @@ only verifies that you've passed a valid mapping.
GroupBy operations (though can't be guaranteed to be the most
efficient). You can get quite creative with the label mapping functions.
+.. _groupby.sorting:
+
+GroupBy sorting
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default the group keys are sorted during the ``groupby`` operation. You may however pass ``sort=False`` for potential speedups:
+
+.. ipython:: python
+
+ df2 = pd.DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]})
+ df2.groupby(['X']).sum()
+ df2.groupby(['X'], sort=False).sum()
+
+
+Note that ``groupby`` will preserve the order in which *observations* are sorted *within* each group. For example, the groups created by ``groupby()`` below are in the order the appeared in the original ``DataFrame``:
+
+.. ipython:: python
+
+ df3 = pd.DataFrame({'X' : ['A', 'B', 'A', 'B'], 'Y' : [1, 4, 3, 2]})
+ df3.groupby(['X']).get_group('A')
+
+ df3.groupby(['X']).get_group('B')
+
+
+
.. _groupby.attributes:
GroupBy object attributes
@@ -183,14 +208,6 @@ the length of the ``groups`` dict, so it is largely just a convenience:
grouped.groups
len(grouped)
-By default the group keys are sorted during the groupby operation. You may
-however pass ``sort=False`` for potential speedups:
-
-.. ipython:: python
-
- df2 = pd.DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]})
- df2.groupby(['X'], sort=True).sum()
- df2.groupby(['X'], sort=False).sum()
.. _groupby.tabcompletion:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d3a63f9f5d851..958bd2933d63b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3247,11 +3247,13 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
- Sort group keys. Get better performance by turning this off
+ Sort group keys. Get better performance by turning this off.
+ Note this does not influence the order of observations within each group.
+ groupby preserves the order of rows within each group.
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
squeeze : boolean, default False
- reduce the dimensionaility of the return type if possible,
+ reduce the dimensionality of the return type if possible,
otherwise return a consistent type
Examples
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index f7b6f947d8924..f5693983f1cc1 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -5436,6 +5436,32 @@ def test_first_last_max_min_on_time_data(self):
assert_frame_equal(grouped_ref.first(),grouped_test.first())
assert_frame_equal(grouped_ref.last(),grouped_test.last())
+ def test_groupby_preserves_sort(self):
+ # Test to ensure that groupby always preserves sort order of original
+ # object. Issue #8588 and #9651
+
+ df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3],
+ 'string_groups':['z','a','z','a','a','g','g','g'],
+ 'ints':[8,7,4,5,2,9,1,1],
+ 'floats':[2.3,5.3,6.2,-2.4,2.2,1.1,1.1,5],
+ 'strings':['z','d','a','e','word','word2','42','47']})
+
+ # Try sorting on different types and with different group types
+ for sort_column in ['ints', 'floats', 'strings', ['ints','floats'],
+ ['ints','strings']]:
+ for group_column in ['int_groups', 'string_groups',
+ ['int_groups','string_groups']]:
+
+ df = df.sort_values(by=sort_column)
+
+ g = df.groupby(group_column)
+
+ def test_sort(x):
+ assert_frame_equal(x, x.sort_values(by=sort_column))
+
+ g.apply(test_sort)
+
+
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
| xref #9651
closes #8588
Adds test to ensure the sort of a target object is preserved within `groupby()` groups, modifies docs to make it clear sort is preserved within groups.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10931 | 2015-08-29T18:16:32Z | 2015-09-05T22:20:25Z | 2015-09-05T22:20:25Z | 2015-09-05T22:20:28Z |
[DEPR]: Deprecate setting nans in categories | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index a449639f1560e..a0f9383336940 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -1,5 +1,5 @@
from .pandas_vb_common import *
-
+import string
class concat_categorical(object):
goal_time = 0.2
@@ -25,3 +25,21 @@ def time_value_counts(self):
def time_value_counts_dropna(self):
self.ts.value_counts(dropna=True)
+
+class categorical_constructor(object):
+ goal_time = 0.2
+
+ def setup(self):
+ n = 5
+ N = 1e6
+ self.categories = list(string.ascii_letters[:n])
+ self.cat_idx = Index(self.categories)
+ self.values = np.tile(self.categories, N)
+ self.codes = np.tile(range(n), N)
+
+ def time_regular_constructor(self):
+ Categorical(self.values, self.categories)
+
+ def time_fastpath(self):
+ Categorical(self.codes, self.cat_idx, fastpath=True)
+
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst
index 3c9b538caa555..534ab0e343398 100644
--- a/doc/source/categorical.rst
+++ b/doc/source/categorical.rst
@@ -632,41 +632,35 @@ Missing Data
pandas primarily uses the value `np.nan` to represent missing data. It is by
default not included in computations. See the :ref:`Missing Data section
-<missing_data>`
+<missing_data>`.
-There are two ways a `np.nan` can be represented in categorical data: either the value is not
-available ("missing value") or `np.nan` is a valid category.
+Missing values should **not** be included in the Categorical's ``categories``,
+only in the ``values``.
+Instead, it is understood that NaN is different, and is always a possibility.
+When working with the Categorical's ``codes``, missing values will always have
+a code of ``-1``.
.. ipython:: python
s = pd.Series(["a","b",np.nan,"a"], dtype="category")
# only two categories
s
- s2 = pd.Series(["a","b","c","a"], dtype="category")
- s2.cat.categories = [1,2,np.nan]
- # three categories, np.nan included
- s2
+ s.codes
-.. note::
- As integer `Series` can't include NaN, the categories were converted to `object`.
-.. note::
- Missing value methods like ``isnull`` and ``fillna`` will take both missing values as well as
- `np.nan` categories into account:
+Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Series.fillna`,
+:meth:`~Series.dropna`, all work normally:
.. ipython:: python
c = pd.Series(["a","b",np.nan], dtype="category")
- c.cat.set_categories(["a","b",np.nan], inplace=True)
- # will be inserted as a NA category:
- c[0] = np.nan
s = pd.Series(c)
s
pd.isnull(s)
s.fillna("a")
Differences to R's `factor`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
The following differences to R's factor functions can be observed:
@@ -677,6 +671,9 @@ The following differences to R's factor functions can be observed:
* In contrast to R's `factor` function, using categorical data as the sole input to create a
new categorical series will *not* remove unused categories but create a new categorical series
which is equal to the passed in one!
+* R allows for missing values to be included in its `levels` (pandas' `categories`). Pandas
+ does not allow `NaN` categories, but missing values can still be in the `values`.
+
Gotchas
-------
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index eae33bc80be32..424be6d949f13 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -652,6 +652,7 @@ Deprecations
===================== =================================
- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
+- Setting missing values (NaN) in a ``Categorical``'s ``categories`` will issue a warning (:issue:`10748`). You can still have missing values in the ``values``.
- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`)
- ``Series.nsmallest`` and ``nlargest``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`10792`)
- ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 6d1c89a7a2f89..fe9bac7f4c68e 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -392,6 +392,7 @@ def argmin(self, axis=None):
"""
return nanops.nanargmin(self.values)
+ @cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return com.isnull(self).any()
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 9951024ffe218..4a6a26f21b5bf 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -207,7 +207,7 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
- self.categories = categories
+ self._categories = self._validate_categories(categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
@@ -274,6 +274,8 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
### FIXME ####
raise NotImplementedError("> 1 ndim Categorical are not supported at this time")
+ categories = self._validate_categories(categories)
+
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels array -> not anymore
@@ -282,7 +284,6 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
# make sure that we always have the same type here, no matter what we get passed in
categories = self._validate_categories(categories)
-
codes = _get_codes_for_values(values, categories)
# TODO: check for old style usage. These warnings should be removes after 0.18/ in 2016
@@ -295,7 +296,7 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F
"'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
- self.categories = categories
+ self._categories = categories
self._codes = _coerce_indexer_dtype(codes, categories)
def copy(self):
@@ -421,9 +422,15 @@ def _get_labels(self):
_categories = None
@classmethod
- def _validate_categories(cls, categories):
+ def _validate_categories(cls, categories, fastpath=False):
"""
Validates that we have good categories
+
+ Parameters
+ ----------
+ fastpath : boolean (default: False)
+ Don't perform validation of the categories for uniqueness or nulls
+
"""
if not isinstance(categories, ABCIndexClass):
dtype = None
@@ -439,16 +446,40 @@ def _validate_categories(cls, categories):
from pandas import Index
categories = Index(categories, dtype=dtype)
- if not categories.is_unique:
- raise ValueError('Categorical categories must be unique')
+
+ if not fastpath:
+
+ # check properties of the categories
+ # we don't allow NaNs in the categories themselves
+
+ if categories.hasnans:
+ # NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748
+ msg = ('\nSetting NaNs in `categories` is deprecated and '
+ 'will be removed in a future version of pandas.')
+ warn(msg, FutureWarning, stacklevel=5)
+
+ # categories must be unique
+
+ if not categories.is_unique:
+ raise ValueError('Categorical categories must be unique')
+
return categories
- def _set_categories(self, categories):
- """ Sets new categories """
- categories = self._validate_categories(categories)
- if not self._categories is None and len(categories) != len(self._categories):
+ def _set_categories(self, categories, fastpath=False):
+ """ Sets new categories
+
+ Parameters
+ ----------
+ fastpath : boolean (default: False)
+ Don't perform validation of the categories for uniqueness or nulls
+
+ """
+
+ categories = self._validate_categories(categories, fastpath=fastpath)
+ if not fastpath and not self._categories is None and len(categories) != len(self._categories):
raise ValueError("new categories need to have the same number of items than the old "
"categories!")
+
self._categories = categories
def _get_categories(self):
@@ -581,11 +612,10 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal
if not cat._categories is None and len(new_categories) < len(cat._categories):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
- cat._categories = new_categories
else:
values = cat.__array__()
cat._codes = _get_codes_for_values(values, new_categories)
- cat._categories = new_categories
+ cat._categories = new_categories
if ordered is None:
ordered = self.ordered
@@ -706,9 +736,8 @@ def add_categories(self, new_categories, inplace=False):
msg = "new categories must not include old categories: %s" % str(already_included)
raise ValueError(msg)
new_categories = list(self._categories) + list(new_categories)
- new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
- cat._categories = new_categories
+ cat._categories = self._validate_categories(new_categories)
cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
if not inplace:
return cat
@@ -1171,7 +1200,7 @@ def order(self, inplace=False, ascending=True, na_position='last'):
Category.sort
"""
warn("order is deprecated, use sort_values(...)",
- FutureWarning, stacklevel=2)
+ FutureWarning, stacklevel=3)
return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last'):
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 05da93a4fca0f..d847638ff105e 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -129,7 +129,8 @@ def f():
Categorical(["a","b"], ["a","b","b"])
self.assertRaises(ValueError, f)
def f():
- Categorical([1,2], [1,2,np.nan, np.nan])
+ with tm.assert_produces_warning(FutureWarning):
+ Categorical([1,2], [1,2,np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
@@ -187,17 +188,21 @@ def f():
cat = pd.Categorical([np.nan, 1., 2., 3. ])
self.assertTrue(com.is_float_dtype(cat.categories))
+ # Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in categories
- cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])
+ with tm.assert_produces_warning(FutureWarning):
+ cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember the original type"
# feature to try to cast the array interface result to...
#vals = np.asarray(cat[cat.notnull()])
#self.assertTrue(com.is_integer_dtype(vals))
- cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"])
+ with tm.assert_produces_warning(FutureWarning):
+ cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
- cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])
+ with tm.assert_produces_warning(FutureWarning):
+ cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
@@ -465,8 +470,9 @@ def test_describe(self):
tm.assert_frame_equal(desc, expected)
# NA as a category
- cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan])
- result = cat.describe()
+ with tm.assert_produces_warning(FutureWarning):
+ cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan])
+ result = cat.describe()
expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]],
columns=['counts','freqs'],
@@ -474,8 +480,9 @@ def test_describe(self):
tm.assert_frame_equal(result,expected)
# NA as an unused category
- cat = pd.Categorical(["a","c","c"], categories=["b","a","c",np.nan])
- result = cat.describe()
+ with tm.assert_produces_warning(FutureWarning):
+ cat = pd.Categorical(["a","c","c"], categories=["b","a","c",np.nan])
+ result = cat.describe()
expected = DataFrame([[0,0],[1,1/3.],[2,2/3.],[0,0]],
columns=['counts','freqs'],
@@ -827,29 +834,37 @@ def test_nan_handling(self):
self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0]))
# If categories have nan included, the code should point to that instead
- c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])
- self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
- self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))
+ with tm.assert_produces_warning(FutureWarning):
+ c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])
+ self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
+ dtype=np.object_))
+ self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
c[1] = np.nan
- self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
- self.assert_numpy_array_equal(c._codes , np.array([0,2,2,0]))
+ self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
+ dtype=np.object_))
+ self.assert_numpy_array_equal(c._codes, np.array([0,2,2,0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a","b","c","a"])
- c.categories = ["a","b",np.nan]
- self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
- self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))
+ with tm.assert_produces_warning(FutureWarning):
+ c.categories = ["a","b",np.nan]
+ self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
+ dtype=np.object_))
+ self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
# Adding nan to categories should make assigned nan point to the category!
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
- c.set_categories(["a","b",np.nan], rename=True, inplace=True)
- self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
- self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
+ with tm.assert_produces_warning(FutureWarning):
+ c.set_categories(["a","b",np.nan], rename=True, inplace=True)
+ self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
+ dtype=np.object_))
+ self.assert_numpy_array_equal(c._codes, np.array([0,1,-1,0]))
c[1] = np.nan
- self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
- self.assert_numpy_array_equal(c._codes , np.array([0,2,-1,0]))
+ self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],
+ dtype=np.object_))
+ self.assert_numpy_array_equal(c._codes, np.array([0,2,-1,0]))
# Remove null categories (GH 10156)
cases = [
@@ -861,17 +876,22 @@ def test_nan_handling(self):
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
- base = Categorical([], with_null)
+ with tm.assert_produces_warning(FutureWarning):
+ base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
- self.assert_categorical_equal(result, expected)
+ self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
- self.assertRaises(ValueError, lambda: Categorical([], categories=nulls))
+
+ def f():
+ with tm.assert_produces_warning(FutureWarning):
+ Categorical([], categories=nulls)
+ self.assertRaises(ValueError, f)
def test_isnull(self):
@@ -880,14 +900,16 @@ def test_isnull(self):
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
- c = Categorical(["a","b",np.nan], categories=["a","b",np.nan])
+ with tm.assert_produces_warning(FutureWarning):
+ c = Categorical(["a","b",np.nan], categories=["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a","b",np.nan])
- c.set_categories(["a","b",np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
@@ -1087,31 +1109,36 @@ def test_set_item_nan(self):
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
- cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0,3,2,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
- cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
- cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0,3,0,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
- cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3])
- cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0,1,3,2])
self.assert_numpy_array_equal(cat.codes, exp)
@@ -1555,14 +1582,16 @@ def test_nan_handling(self):
self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))
# If categories have nan included, the label should point to that instead
- s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]))
+ with tm.assert_produces_warning(FutureWarning):
+ s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a","b","c","a"]))
- s3.cat.categories = ["a","b",np.nan]
+ with tm.assert_produces_warning(FutureWarning):
+ s3.cat.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))
@@ -2415,28 +2444,32 @@ def test_value_counts_with_nan(self):
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3], index=["a", "b", np.nan]))
- s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan]))
- tm.assert_series_equal(
- s.value_counts(dropna=True),
- pd.Series([2, 1], index=["a", "b"]))
- tm.assert_series_equal(
- s.value_counts(dropna=False),
- pd.Series([2, 1, 0], index=["a", "b", np.nan]))
+ with tm.assert_produces_warning(FutureWarning):
+ s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan]))
+ tm.assert_series_equal(
+ s.value_counts(dropna=True),
+ pd.Series([2, 1], index=["a", "b"]))
+ tm.assert_series_equal(
+ s.value_counts(dropna=False),
+ pd.Series([2, 1, 0], index=["a", "b", np.nan]))
- s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None], categories=["a", "b", np.nan]))
- tm.assert_series_equal(
- s.value_counts(dropna=True),
- pd.Series([2, 1], index=["a", "b"]))
- tm.assert_series_equal(
- s.value_counts(dropna=False),
- pd.Series([3, 2, 1], index=[np.nan, "a", "b"]))
+ with tm.assert_produces_warning(FutureWarning):
+ s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None],
+ categories=["a", "b", np.nan]))
+ tm.assert_series_equal(
+ s.value_counts(dropna=True),
+ pd.Series([2, 1], index=["a", "b"]))
+ tm.assert_series_equal(
+ s.value_counts(dropna=False),
+ pd.Series([3, 2, 1], index=[np.nan, "a", "b"]))
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True)
data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats})
- expected = DataFrame({ 'a' : Series([1,2,4,np.nan],index=Index(['a','b','c','d'],name='b')) })
+ expected = DataFrame({'a': Series([1, 2, 4, np.nan],
+ index=Index(['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
@@ -3454,10 +3487,12 @@ def f():
# make sure that fillna takes both missing values and NA categories into account
c = Categorical(["a","b",np.nan])
- c.set_categories(["a","b",np.nan], rename=True, inplace=True)
+ with tm.assert_produces_warning(FutureWarning):
+ c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats":c, "vals":[1,2,3]})
df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]})
+
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
| WIP still
Closes https://github.com/pydata/pandas/issues/10748
I have to run for now, but will pick this up later today.
I think I'm missing a few in the tests, since the warning is showing up in a bunch of places (there's a way to convert those to errors for testing right?)
I had to refactor a couple function that were setting `._categories` directly instead of using `Categorical._set_categories`. I kept that in a separate commit. Could do a bit more refactoring with the `validate_categories` stuff, but that can be separate.
And I need to figure out the proper `stacklevel` for this warning. I think I used 3 for now.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10929 | 2015-08-29T13:22:00Z | 2015-09-01T19:18:15Z | 2015-09-01T19:18:14Z | 2017-04-05T02:06:34Z |
Updates for asv suite | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 760db2086b125..239f9aa19f769 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -18,7 +18,7 @@
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
- "environment_type": "conda",
+ "environment_type": "",
// the base URL to show a commit for the project.
"show_commit_url": "https://github.com/pydata/pandas/commit/",
@@ -26,7 +26,7 @@
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
// "pythons": ["2.7", "3.4"],
- "pythons": ["2.7"],
+ "pythons": ["2.7", "3.4"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
@@ -41,7 +41,10 @@
"sqlalchemy": [],
"scipy": [],
"numexpr": [],
- "pytables": [],
+ "tables": [],
+ "openpyxl": [],
+ "xlrd": [],
+ "xlwt": []
},
// The directory (relative to the current directory) that benchmarks are
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index ecb91923dc663..2b10cb88a3134 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class getattr_dataframe_index(object):
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index 13976014ec6f1..187101b1f392b 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
import pandas.computation.expressions as expr
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 80b277336df7a..a449639f1560e 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class concat_categorical(object):
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index b48211b3db83e..265ffbc7261ca 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class frame_constructor_ndarray(object):
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 397312355aa47..719d92567a7be 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -1,6 +1,6 @@
-from pandas_vb_common import *
-import pandas.computation.expressions as expr
+from .pandas_vb_common import *
import pandas as pd
+import pandas.computation.expressions as expr
class eval_frame_add_all_threads(object):
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 2cb337e0e6b9d..85f3c1628bd8b 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
try:
from pandas.tseries.offsets import *
except:
@@ -9,1611 +9,1611 @@ class frame_ctor_dtindex_BDayx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BDay(1, **{}))
+ self.idx = self.get_index_for_offset(BDay(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BDayx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BDayx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BDay(2, **{}))
+ self.idx = self.get_index_for_offset(BDay(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BDayx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BMonthBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BMonthBegin(1, **{}))
+ self.idx = self.get_index_for_offset(BMonthBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BMonthBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BMonthBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BMonthBegin(2, **{}))
+ self.idx = self.get_index_for_offset(BMonthBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BMonthBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BMonthEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BMonthEnd(1, **{}))
+ self.idx = self.get_index_for_offset(BMonthEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BMonthEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BMonthEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BMonthEnd(2, **{}))
+ self.idx = self.get_index_for_offset(BMonthEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BMonthEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BQuarterBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BQuarterBegin(1, **{}))
+ self.idx = self.get_index_for_offset(BQuarterBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BQuarterBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BQuarterBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BQuarterBegin(2, **{}))
+ self.idx = self.get_index_for_offset(BQuarterBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BQuarterBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BQuarterEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BQuarterEnd(1, **{}))
+ self.idx = self.get_index_for_offset(BQuarterEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BQuarterEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BQuarterEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BQuarterEnd(2, **{}))
+ self.idx = self.get_index_for_offset(BQuarterEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BQuarterEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BYearBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BYearBegin(1, **{}))
+ self.idx = self.get_index_for_offset(BYearBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BYearBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BYearBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BYearBegin(2, **{}))
+ self.idx = self.get_index_for_offset(BYearBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BYearBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BYearEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BYearEnd(1, **{}))
+ self.idx = self.get_index_for_offset(BYearEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BYearEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BYearEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BYearEnd(2, **{}))
+ self.idx = self.get_index_for_offset(BYearEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BYearEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BusinessDayx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BusinessDay(1, **{}))
+ self.idx = self.get_index_for_offset(BusinessDay(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BusinessDayx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BusinessDayx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BusinessDay(2, **{}))
+ self.idx = self.get_index_for_offset(BusinessDay(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BusinessDayx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BusinessHourx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BusinessHour(1, **{}))
+ self.idx = self.get_index_for_offset(BusinessHour(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BusinessHourx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_BusinessHourx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(BusinessHour(2, **{}))
+ self.idx = self.get_index_for_offset(BusinessHour(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_BusinessHourx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CBMonthBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CBMonthBegin(1, **{}))
+ self.idx = self.get_index_for_offset(CBMonthBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CBMonthBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CBMonthBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CBMonthBegin(2, **{}))
+ self.idx = self.get_index_for_offset(CBMonthBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CBMonthBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CBMonthEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CBMonthEnd(1, **{}))
+ self.idx = self.get_index_for_offset(CBMonthEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CBMonthEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CBMonthEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CBMonthEnd(2, **{}))
+ self.idx = self.get_index_for_offset(CBMonthEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CBMonthEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CDayx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CDay(1, **{}))
+ self.idx = self.get_index_for_offset(CDay(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CDayx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CDayx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CDay(2, **{}))
+ self.idx = self.get_index_for_offset(CDay(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CDayx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CustomBusinessDayx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CustomBusinessDay(1, **{}))
+ self.idx = self.get_index_for_offset(CustomBusinessDay(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CustomBusinessDayx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_CustomBusinessDayx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(CustomBusinessDay(2, **{}))
+ self.idx = self.get_index_for_offset(CustomBusinessDay(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_CustomBusinessDayx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_DateOffsetx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(DateOffset(1, **{}))
+ self.idx = self.get_index_for_offset(DateOffset(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_DateOffsetx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_DateOffsetx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(DateOffset(2, **{}))
+ self.idx = self.get_index_for_offset(DateOffset(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_DateOffsetx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Dayx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Day(1, **{}))
+ self.idx = self.get_index_for_offset(Day(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Dayx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Dayx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Day(2, **{}))
+ self.idx = self.get_index_for_offset(Day(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Dayx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Easterx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Easter(1, **{}))
+ self.idx = self.get_index_for_offset(Easter(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Easterx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Easterx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Easter(2, **{}))
+ self.idx = self.get_index_for_offset(Easter(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Easterx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253Quarterx1__variation_last(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', }))
+ self.idx = self.get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253Quarterx1__variation_last(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253Quarterx1__variation_nearest(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', }))
+ self.idx = self.get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253Quarterx1__variation_nearest(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253Quarterx2__variation_last(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', }))
+ self.idx = self.get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253Quarterx2__variation_last(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253Quarterx2__variation_nearest(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', }))
+ self.idx = self.get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253Quarterx2__variation_nearest(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253x1__variation_last(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', }))
+ self.idx = self.get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253x1__variation_last(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253x1__variation_nearest(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', }))
+ self.idx = self.get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253x1__variation_nearest(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253x2__variation_last(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', }))
+ self.idx = self.get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253x2__variation_last(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_FY5253x2__variation_nearest(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', }))
+ self.idx = self.get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_FY5253x2__variation_nearest(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Hourx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Hour(1, **{}))
+ self.idx = self.get_index_for_offset(Hour(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Hourx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Hourx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Hour(2, **{}))
+ self.idx = self.get_index_for_offset(Hour(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Hourx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_LastWeekOfMonthx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(LastWeekOfMonth(1, **{'week': 1, 'weekday': 1, }))
+ self.idx = self.get_index_for_offset(LastWeekOfMonth(1, **{'week': 1, 'weekday': 1, }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_LastWeekOfMonthx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_LastWeekOfMonthx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(LastWeekOfMonth(2, **{'week': 1, 'weekday': 1, }))
+ self.idx = self.get_index_for_offset(LastWeekOfMonth(2, **{'week': 1, 'weekday': 1, }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_LastWeekOfMonthx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Microx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Micro(1, **{}))
+ self.idx = self.get_index_for_offset(Micro(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Microx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Microx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Micro(2, **{}))
+ self.idx = self.get_index_for_offset(Micro(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Microx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Millix1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Milli(1, **{}))
+ self.idx = self.get_index_for_offset(Milli(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Millix1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Millix2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Milli(2, **{}))
+ self.idx = self.get_index_for_offset(Milli(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Millix2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Minutex1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Minute(1, **{}))
+ self.idx = self.get_index_for_offset(Minute(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Minutex1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Minutex2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Minute(2, **{}))
+ self.idx = self.get_index_for_offset(Minute(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Minutex2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_MonthBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(MonthBegin(1, **{}))
+ self.idx = self.get_index_for_offset(MonthBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_MonthBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_MonthBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(MonthBegin(2, **{}))
+ self.idx = self.get_index_for_offset(MonthBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_MonthBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_MonthEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(MonthEnd(1, **{}))
+ self.idx = self.get_index_for_offset(MonthEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_MonthEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_MonthEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(MonthEnd(2, **{}))
+ self.idx = self.get_index_for_offset(MonthEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_MonthEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Nanox1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Nano(1, **{}))
+ self.idx = self.get_index_for_offset(Nano(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Nanox1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Nanox2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Nano(2, **{}))
+ self.idx = self.get_index_for_offset(Nano(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Nanox2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_QuarterBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(QuarterBegin(1, **{}))
+ self.idx = self.get_index_for_offset(QuarterBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_QuarterBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_QuarterBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(QuarterBegin(2, **{}))
+ self.idx = self.get_index_for_offset(QuarterBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_QuarterBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_QuarterEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(QuarterEnd(1, **{}))
+ self.idx = self.get_index_for_offset(QuarterEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_QuarterEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_QuarterEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(QuarterEnd(2, **{}))
+ self.idx = self.get_index_for_offset(QuarterEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_QuarterEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Secondx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Second(1, **{}))
+ self.idx = self.get_index_for_offset(Second(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Secondx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Secondx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Second(2, **{}))
+ self.idx = self.get_index_for_offset(Second(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Secondx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_WeekOfMonthx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(WeekOfMonth(1, **{'week': 1, 'weekday': 1, }))
+ self.idx = self.get_index_for_offset(WeekOfMonth(1, **{'week': 1, 'weekday': 1, }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_WeekOfMonthx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_WeekOfMonthx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(WeekOfMonth(2, **{'week': 1, 'weekday': 1, }))
+ self.idx = self.get_index_for_offset(WeekOfMonth(2, **{'week': 1, 'weekday': 1, }))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_WeekOfMonthx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Weekx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Week(1, **{}))
+ self.idx = self.get_index_for_offset(Week(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Weekx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_Weekx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(Week(2, **{}))
+ self.idx = self.get_index_for_offset(Week(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_Weekx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_YearBeginx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(YearBegin(1, **{}))
+ self.idx = self.get_index_for_offset(YearBegin(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_YearBeginx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_YearBeginx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(YearBegin(2, **{}))
+ self.idx = self.get_index_for_offset(YearBegin(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_YearBeginx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_YearEndx1(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(YearEnd(1, **{}))
+ self.idx = self.get_index_for_offset(YearEnd(1, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_YearEndx1(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_dtindex_YearEndx2(object):
goal_time = 0.2
def setup(self):
-
- def get_period_count(start_date, off):
- self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (self.ten_offsets_in_days == 0):
- return 1000
- else:
- return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
-
- def get_index_for_offset(off):
- self.start_date = Timestamp('1/1/1900')
- return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off)
- self.idx = get_index_for_offset(YearEnd(2, **{}))
+ self.idx = self.get_index_for_offset(YearEnd(2, **{}))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = dict([(col, self.df[col]) for col in self.df.columns])
def time_frame_ctor_dtindex_YearEndx2(self):
DataFrame(self.d)
+ def get_period_count(self, start_date, off):
+ self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
+ if (self.ten_offsets_in_days == 0):
+ return 1000
+ else:
+ return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000)
+
+ def get_index_for_offset(self, off):
+ self.start_date = Timestamp('1/1/1900')
+ return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off)
+
class frame_ctor_list_of_dict(object):
goal_time = 0.2
@@ -1657,7 +1657,7 @@ class frame_ctor_nested_dict_int64(object):
goal_time = 0.2
def setup(self):
- self.data = dict(((i, dict(((j, float(j)) for j in xrange(100)))) for i in xrange(2000)))
+ self.data = dict(((i, dict(((j, float(j)) for j in range(100)))) for i in xrange(2000)))
def time_frame_ctor_nested_dict_int64(self):
DataFrame(self.data)
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 2bd51201b45ca..98b0ec73fb23c 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class frame_apply_axis_1(object):
@@ -69,12 +69,12 @@ def setup(self):
self.idx = date_range('1/1/2000', periods=100000, freq='D')
self.df = DataFrame(randn(100000, 1), columns=['A'], index=self.idx)
- def f(x):
- self.x = self.x.copy()
- self.x['date'] = self.x.index
-
def time_frame_assign_timeseries_index(self):
- f(self.df)
+ self.f(self.df)
+
+ def f(self, df):
+ self.x = self.df.copy()
+ self.x['date'] = self.x.index
class frame_boolean_row_select(object):
@@ -339,80 +339,76 @@ class frame_float_equal(object):
goal_time = 0.2
def setup(self):
-
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
-
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
-
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_float_equal(self):
- test_equal('float_df')
+ self.test_equal('float_df')
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
-class frame_float_unequal(object):
- goal_time = 0.2
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
- def setup(self):
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
+class frame_float_unequal(object):
+ goal_time = 0.2
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
+ def setup(self):
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_float_unequal(self):
- test_unequal('float_df')
+ self.test_unequal('float_df')
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
-class frame_from_records_generator(object):
- goal_time = 0.2
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
- def setup(self):
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
- def get_data(n=100000):
- return ((x, (x * 20), (x * 100)) for x in xrange(n))
+
+class frame_from_records_generator(object):
+ goal_time = 0.2
def time_frame_from_records_generator(self):
- self.df = DataFrame.from_records(get_data())
+ self.df = DataFrame.from_records(self.get_data())
+
+ def get_data(self, n=100000):
+ return ((x, (x * 20), (x * 100)) for x in range(n))
class frame_from_records_generator_nrows(object):
goal_time = 0.2
- def setup(self):
-
- def get_data(n=100000):
- return ((x, (x * 20), (x * 100)) for x in xrange(n))
-
def time_frame_from_records_generator_nrows(self):
- self.df = DataFrame.from_records(get_data(), nrows=1000)
+ self.df = DataFrame.from_records(self.get_data(), nrows=1000)
+
+ def get_data(self, n=100000):
+ return ((x, (x * 20), (x * 100)) for x in range(n))
class frame_get_dtype_counts(object):
@@ -433,26 +429,26 @@ def setup(self):
self.df2 = DataFrame(randn(3000, 1), columns=['A'])
self.df3 = DataFrame(randn(3000, 1))
- def f():
- if hasattr(self.df, '_item_cache'):
- self.df._item_cache.clear()
- for (name, col) in self.df.iteritems():
- pass
+ def time_frame_getitem_single_column(self):
+ self.h()
- def g():
- for (name, col) in self.df.iteritems():
- pass
+ def f(self):
+ if hasattr(self.df, '_item_cache'):
+ self.df._item_cache.clear()
+ for (name, col) in self.df.iteritems():
+ pass
- def h():
- for i in xrange(10000):
- self.df2['A']
+ def g(self):
+ for (name, col) in self.df.iteritems():
+ pass
- def j():
- for i in xrange(10000):
- self.df3[0]
+ def h(self):
+ for i in range(10000):
+ self.df2['A']
- def time_frame_getitem_single_column(self):
- h()
+ def j(self):
+ for i in range(10000):
+ self.df3[0]
class frame_getitem_single_column2(object):
@@ -463,26 +459,26 @@ def setup(self):
self.df2 = DataFrame(randn(3000, 1), columns=['A'])
self.df3 = DataFrame(randn(3000, 1))
- def f():
- if hasattr(self.df, '_item_cache'):
- self.df._item_cache.clear()
- for (name, col) in self.df.iteritems():
- pass
+ def time_frame_getitem_single_column2(self):
+ self.j()
- def g():
- for (name, col) in self.df.iteritems():
- pass
+ def f(self):
+ if hasattr(self.df, '_item_cache'):
+ self.df._item_cache.clear()
+ for (name, col) in self.df.iteritems():
+ pass
- def h():
- for i in xrange(10000):
- self.df2['A']
+ def g(self):
+ for (name, col) in self.df.iteritems():
+ pass
- def j():
- for i in xrange(10000):
- self.df3[0]
+ def h(self):
+ for i in range(10000):
+ self.df2['A']
- def time_frame_getitem_single_column2(self):
- j()
+ def j(self):
+ for i in range(10000):
+ self.df3[0]
class frame_html_repr_trunc_mi(object):
@@ -517,14 +513,14 @@ class frame_insert_100_columns_begin(object):
def setup(self):
self.N = 1000
- def f(K=100):
- self.df = DataFrame(index=range(self.N))
- self.new_col = np.random.randn(self.N)
- for i in range(K):
- self.df.insert(0, i, self.new_col)
-
def time_frame_insert_100_columns_begin(self):
- f()
+ self.f()
+
+ def f(self, K=100):
+ self.df = DataFrame(index=range(self.N))
+ self.new_col = np.random.randn(self.N)
+ for i in range(K):
+ self.df.insert(0, i, self.new_col)
class frame_insert_500_columns_end(object):
@@ -533,14 +529,14 @@ class frame_insert_500_columns_end(object):
def setup(self):
self.N = 1000
- def f(K=500):
- self.df = DataFrame(index=range(self.N))
- self.new_col = np.random.randn(self.N)
- for i in range(K):
- self.df[i] = self.new_col
-
def time_frame_insert_500_columns_end(self):
- f()
+ self.f()
+
+ def f(self, K=500):
+ self.df = DataFrame(index=range(self.N))
+ self.new_col = np.random.randn(self.N)
+ for i in range(K):
+ self.df[i] = self.new_col
class frame_interpolate(object):
@@ -597,26 +593,26 @@ def setup(self):
self.df2 = DataFrame(randn(3000, 1), columns=['A'])
self.df3 = DataFrame(randn(3000, 1))
- def f():
- if hasattr(self.df, '_item_cache'):
- self.df._item_cache.clear()
- for (name, col) in self.df.iteritems():
- pass
+ def time_frame_iteritems(self):
+ self.f()
- def g():
- for (name, col) in self.df.iteritems():
- pass
+ def f(self):
+ if hasattr(self.df, '_item_cache'):
+ self.df._item_cache.clear()
+ for (name, col) in self.df.iteritems():
+ pass
- def h():
- for i in xrange(10000):
- self.df2['A']
+ def g(self):
+ for (name, col) in self.df.iteritems():
+ pass
- def j():
- for i in xrange(10000):
- self.df3[0]
+ def h(self):
+ for i in range(10000):
+ self.df2['A']
- def time_frame_iteritems(self):
- f()
+ def j(self):
+ for i in range(10000):
+ self.df3[0]
class frame_iteritems_cached(object):
@@ -627,26 +623,26 @@ def setup(self):
self.df2 = DataFrame(randn(3000, 1), columns=['A'])
self.df3 = DataFrame(randn(3000, 1))
- def f():
- if hasattr(self.df, '_item_cache'):
- self.df._item_cache.clear()
- for (name, col) in self.df.iteritems():
- pass
+ def time_frame_iteritems_cached(self):
+ self.g()
- def g():
- for (name, col) in self.df.iteritems():
- pass
+ def f(self):
+ if hasattr(self.df, '_item_cache'):
+ self.df._item_cache.clear()
+ for (name, col) in self.df.iteritems():
+ pass
- def h():
- for i in xrange(10000):
- self.df2['A']
+ def g(self):
+ for (name, col) in self.df.iteritems():
+ pass
- def j():
- for i in xrange(10000):
- self.df3[0]
+ def h(self):
+ for i in range(10000):
+ self.df2['A']
- def time_frame_iteritems_cached(self):
- g()
+ def j(self):
+ for i in range(10000):
+ self.df3[0]
class frame_mask_bools(object):
@@ -681,112 +677,112 @@ class frame_nonunique_equal(object):
goal_time = 0.2
def setup(self):
-
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
-
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
-
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_nonunique_equal(self):
- test_equal('nonunique_cols')
+ self.test_equal('nonunique_cols')
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
-class frame_nonunique_unequal(object):
- goal_time = 0.2
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
- def setup(self):
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
+class frame_nonunique_unequal(object):
+ goal_time = 0.2
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
+ def setup(self):
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_nonunique_unequal(self):
- test_unequal('nonunique_cols')
+ self.test_unequal('nonunique_cols')
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
-class frame_object_equal(object):
- goal_time = 0.2
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
- def setup(self):
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
+class frame_object_equal(object):
+ goal_time = 0.2
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
+ def setup(self):
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_object_equal(self):
- test_equal('object_df')
+ self.test_equal('object_df')
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
-class frame_object_unequal(object):
- goal_time = 0.2
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
- def setup(self):
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
- def make_pair(frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
- def test_equal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df)
+class frame_object_unequal(object):
+ goal_time = 0.2
- def test_unequal(name):
- (self.df, self.df2) = pairs[name]
- return self.df.equals(self.df2)
+ def setup(self):
self.float_df = DataFrame(np.random.randn(1000, 1000))
self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
+ self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))])
def time_frame_object_unequal(self):
- test_unequal('object_df')
+ self.test_unequal('object_df')
+
+ def make_pair(self, frame):
+ self.df = frame
+ self.df2 = self.df.copy()
+ self.df2.ix[((-1), (-1))] = np.nan
+ return (self.df, self.df2)
+
+ def test_equal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df)
+
+ def test_unequal(self, name):
+ (self.df, self.df2) = self.pairs[name]
+ return self.df.equals(self.df2)
class frame_reindex_axis0(object):
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index b0486617a52af..556dd2c364cdf 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -1,6 +1,16 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas.core import common as com
-from pandas.util.testing import test_parallel
+try:
+ from pandas.util.testing import test_parallel
+ have_real_test_parallel = True
+except ImportError:
+ have_real_test_parallel = False
+
+ def test_parallel(num_threads=1):
+
+ def wrapper(fname):
+ return fname
+ return wrapper
class nogil_groupby_count_2(object):
@@ -11,13 +21,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].count()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_count_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].count()
class nogil_groupby_last_2(object):
@@ -28,13 +40,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].last()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_last_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].last()
class nogil_groupby_max_2(object):
@@ -45,13 +59,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].max()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_max_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].max()
class nogil_groupby_mean_2(object):
@@ -62,13 +78,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].mean()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_mean_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].mean()
class nogil_groupby_min_2(object):
@@ -79,13 +97,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].min()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_min_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].min()
class nogil_groupby_prod_2(object):
@@ -96,13 +116,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].prod()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_prod_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].prod()
class nogil_groupby_sum_2(object):
@@ -113,13 +135,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].sum()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_sum_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].sum()
class nogil_groupby_sum_4(object):
@@ -130,36 +154,38 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
+ if (not have_real_test_parallel):
+ raise NotImplementedError
- def f():
- self.df.groupby('key')['data'].sum()
+ def time_nogil_groupby_sum_4(self):
+ self.pg4()
- def g2():
- for i in range(2):
- f()
+ def f(self):
+ self.df.groupby('key')['data'].sum()
- def g4():
- for i in range(4):
- f()
+ def g2(self):
+ for i in range(2):
+ self.f()
- def g8():
- for i in range(8):
- f()
+ def g4(self):
+ for i in range(4):
+ self.f()
- @test_parallel(num_threads=2)
- def pg2():
- f()
+ def g8(self):
+ for i in range(8):
+ self.f()
- @test_parallel(num_threads=4)
- def pg4():
- f()
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.f()
- @test_parallel(num_threads=8)
- def pg8():
- f()
+ @test_parallel(num_threads=4)
+ def pg4(self):
+ self.f()
- def time_nogil_groupby_sum_4(self):
- pg4()
+ @test_parallel(num_threads=8)
+ def pg8(self):
+ self.f()
class nogil_groupby_sum_8(object):
@@ -170,36 +196,38 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
+ if (not have_real_test_parallel):
+ raise NotImplementedError
- def f():
- self.df.groupby('key')['data'].sum()
+ def time_nogil_groupby_sum_8(self):
+ self.pg8()
- def g2():
- for i in range(2):
- f()
+ def f(self):
+ self.df.groupby('key')['data'].sum()
- def g4():
- for i in range(4):
- f()
+ def g2(self):
+ for i in range(2):
+ self.f()
- def g8():
- for i in range(8):
- f()
+ def g4(self):
+ for i in range(4):
+ self.f()
- @test_parallel(num_threads=2)
- def pg2():
- f()
+ def g8(self):
+ for i in range(8):
+ self.f()
- @test_parallel(num_threads=4)
- def pg4():
- f()
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.f()
- @test_parallel(num_threads=8)
- def pg8():
- f()
+ @test_parallel(num_threads=4)
+ def pg4(self):
+ self.f()
- def time_nogil_groupby_sum_8(self):
- pg8()
+ @test_parallel(num_threads=8)
+ def pg8(self):
+ self.f()
class nogil_groupby_var_2(object):
@@ -210,13 +238,15 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
-
- @test_parallel(num_threads=2)
- def pg2():
- self.df.groupby('key')['data'].var()
+ if (not have_real_test_parallel):
+ raise NotImplementedError
def time_nogil_groupby_var_2(self):
- pg2()
+ self.pg2()
+
+ @test_parallel(num_threads=2)
+ def pg2(self):
+ self.df.groupby('key')['data'].var()
class nogil_take1d_float64(object):
@@ -227,20 +257,22 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
+ if (not have_real_test_parallel):
+ raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
- @test_parallel(num_threads=2)
- def take_1d_pg2_int64():
- com.take_1d(self.df.int64.values, self.indexer)
+ def time_nogil_take1d_float64(self):
+ self.take_1d_pg2_int64()
- @test_parallel(num_threads=2)
- def take_1d_pg2_float64():
- com.take_1d(self.df.float64.values, self.indexer)
+ @test_parallel(num_threads=2)
+ def take_1d_pg2_int64(self):
+ com.take_1d(self.df.int64.values, self.indexer)
- def time_nogil_take1d_float64(self):
- take_1d_pg2_int64()
+ @test_parallel(num_threads=2)
+ def take_1d_pg2_float64(self):
+ com.take_1d(self.df.float64.values, self.indexer)
class nogil_take1d_int64(object):
@@ -251,17 +283,19 @@ def setup(self):
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
+ if (not have_real_test_parallel):
+ raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
- @test_parallel(num_threads=2)
- def take_1d_pg2_int64():
- com.take_1d(self.df.int64.values, self.indexer)
+ def time_nogil_take1d_int64(self):
+ self.take_1d_pg2_float64()
- @test_parallel(num_threads=2)
- def take_1d_pg2_float64():
- com.take_1d(self.df.float64.values, self.indexer)
+ @test_parallel(num_threads=2)
+ def take_1d_pg2_int64(self):
+ com.take_1d(self.df.int64.values, self.indexer)
- def time_nogil_take1d_int64(self):
- take_1d_pg2_float64()
\ No newline at end of file
+ @test_parallel(num_threads=2)
+ def take_1d_pg2_float64(self):
+ com.take_1d(self.df.float64.values, self.indexer)
\ No newline at end of file
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 4f1f4e46b4a31..a84a5373651bb 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -1,6 +1,6 @@
-from pandas_vb_common import *
-from itertools import product
+from .pandas_vb_common import *
from string import ascii_letters, digits
+from itertools import product
class groupby_agg_builtins1(object):
@@ -128,11 +128,11 @@ def setup(self):
self.labels2 = np.random.randint(0, 3, size=self.N)
self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), })
- def f(g):
- return 1
-
def time_groupby_frame_apply(self):
- self.df.groupby(['key', 'key2']).apply(f)
+ self.df.groupby(['key', 'key2']).apply(self.f)
+
+ def f(self, g):
+ return 1
class groupby_frame_apply_overhead(object):
@@ -144,11 +144,11 @@ def setup(self):
self.labels2 = np.random.randint(0, 3, size=self.N)
self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), })
- def f(g):
- return 1
-
def time_groupby_frame_apply_overhead(self):
- self.df.groupby('key').apply(f)
+ self.df.groupby('key').apply(self.f)
+
+ def f(self, g):
+ return 1
class groupby_frame_cython_many_columns(object):
@@ -330,24 +330,24 @@ class groupby_multi_cython(object):
def setup(self):
self.N = 100000
self.ngroups = 100
-
- def get_test_data(ngroups=100, n=self.N):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
- self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
-
- def f():
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
self.simple_series = Series(np.random.randn(self.N))
self.key1 = self.df['key1']
def time_groupby_multi_cython(self):
self.df.groupby(['key1', 'key2']).sum()
+ def get_test_data(self, ngroups=100, n=100000):
+ self.unique_groups = range(self.ngroups)
+ self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
+ if (len(self.arr) < n):
+ self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
+ random.shuffle(self.arr)
+ return self.arr
+
+ def f(self):
+ self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+
class groupby_multi_different_functions(object):
goal_time = 0.2
@@ -395,24 +395,24 @@ class groupby_multi_python(object):
def setup(self):
self.N = 100000
self.ngroups = 100
-
- def get_test_data(ngroups=100, n=self.N):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
- self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
-
- def f():
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
self.simple_series = Series(np.random.randn(self.N))
self.key1 = self.df['key1']
def time_groupby_multi_python(self):
self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum()))
+ def get_test_data(self, ngroups=100, n=100000):
+ self.unique_groups = range(self.ngroups)
+ self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
+ if (len(self.arr) < n):
+ self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
+ random.shuffle(self.arr)
+ return self.arr
+
+ def f(self):
+ self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+
class groupby_multi_series_op(object):
goal_time = 0.2
@@ -420,24 +420,24 @@ class groupby_multi_series_op(object):
def setup(self):
self.N = 100000
self.ngroups = 100
-
- def get_test_data(ngroups=100, n=self.N):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
- self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
-
- def f():
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
self.simple_series = Series(np.random.randn(self.N))
self.key1 = self.df['key1']
def time_groupby_multi_series_op(self):
self.df.groupby(['key1', 'key2'])['data1'].agg(np.std)
+ def get_test_data(self, ngroups=100, n=100000):
+ self.unique_groups = range(self.ngroups)
+ self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
+ if (len(self.arr) < n):
+ self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
+ random.shuffle(self.arr)
+ return self.arr
+
+ def f(self):
+ self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+
class groupby_multi_size(object):
goal_time = 0.2
@@ -1468,24 +1468,24 @@ class groupby_series_simple_cython(object):
def setup(self):
self.N = 100000
self.ngroups = 100
-
- def get_test_data(ngroups=100, n=self.N):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
- self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
-
- def f():
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+ self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
self.simple_series = Series(np.random.randn(self.N))
self.key1 = self.df['key1']
def time_groupby_series_simple_cython(self):
self.df.groupby('key1').rank(pct=True)
+ def get_test_data(self, ngroups=100, n=100000):
+ self.unique_groups = range(self.ngroups)
+ self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object)
+ if (len(self.arr) < n):
+ self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
+ random.shuffle(self.arr)
+ return self.arr
+
+ def f(self):
+ self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
+
class groupby_simple_compress_timing(object):
goal_time = 0.2
@@ -1535,12 +1535,12 @@ def setup(self):
self.secid_max = int('F0000000', 16)
self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1))
self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step))
- self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in xrange(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
+ self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
self.n_data = len(self.data_index)
- self.columns = Index(['factor{}'.format(i) for i in xrange(1, (self.n_columns + 1))])
+ self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))])
self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns)
self.step = int((self.n_data * self.share_na))
- for column_index in xrange(self.n_columns):
+ for column_index in range(self.n_columns):
self.index = column_index
while (self.index < self.n_data):
self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan)
@@ -1644,12 +1644,12 @@ def setup(self):
self.secid_max = int('F0000000', 16)
self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1))
self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step))
- self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in xrange(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
+ self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id'])
self.n_data = len(self.data_index)
- self.columns = Index(['factor{}'.format(i) for i in xrange(1, (self.n_columns + 1))])
+ self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))])
self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns)
self.step = int((self.n_data * self.share_na))
- for column_index in xrange(self.n_columns):
+ for column_index in range(self.n_columns):
self.index = column_index
while (self.index < self.n_data):
self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan)
@@ -1660,6 +1660,16 @@ def time_groupby_transform_ufunc(self):
self.data.groupby(level='date').transform(np.max)
+class series_value_counts_float64(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(np.random.randint(0, 1000, size=100000)).astype(float)
+
+ def time_series_value_counts_float64(self):
+ self.s.value_counts()
+
+
class series_value_counts_int64(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py
index 9e36f735f8608..7638cc2a0f8df 100644
--- a/asv_bench/benchmarks/hdfstore_bench.py
+++ b/asv_bench/benchmarks/hdfstore_bench.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
import os
@@ -7,15 +7,9 @@ class query_store_table(object):
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = date_range('1/1/2000', periods=25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('df12', self.df)
@@ -25,21 +19,21 @@ def time_query_store_table(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class query_store_table_wide(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = date_range('1/1/2000', periods=25000)
self.df = DataFrame(np.random.randn(25000, 100), index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('df11', self.df)
@@ -49,21 +43,21 @@ def time_query_store_table_wide(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.put('df1', self.df)
@@ -73,21 +67,21 @@ def time_read_store(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store_mixed(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 250000, size=25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.put('df3', self.df)
@@ -97,21 +91,21 @@ def time_read_store_mixed(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store_table(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('df7', self.df)
@@ -121,22 +115,22 @@ def time_read_store_table(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store_table_mixed(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 10000
self.index = tm.makeStringIndex(self.N)
self.df = DataFrame({'float1': randn(self.N), 'float2': randn(self.N), 'string1': (['foo'] * self.N), 'bool1': ([True] * self.N), 'int1': np.random.randint(0, self.N, size=self.N), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('df5', self.df)
@@ -146,20 +140,20 @@ def time_read_store_table_mixed(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store_table_panel(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
- self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in xrange(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in xrange(25)])
- remove(self.f)
+ self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in range(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in range(25)])
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('p1', self.p)
@@ -169,20 +163,20 @@ def time_read_store_table_panel(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class read_store_table_wide(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.df = DataFrame(np.random.randn(25000, 100))
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
self.store.append('df9', self.df)
@@ -192,21 +186,21 @@ def time_read_store_table_wide(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store(self):
@@ -215,21 +209,21 @@ def time_write_store(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_mixed(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 250000, size=25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_mixed(self):
@@ -238,21 +232,21 @@ def time_write_store_mixed(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_table(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_table(self):
@@ -261,20 +255,20 @@ def time_write_store_table(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_table_dc(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
- self.df = DataFrame(np.random.randn(10000, 10), columns=[('C%03d' % i) for i in xrange(10)])
- remove(self.f)
+ self.df = DataFrame(np.random.randn(10000, 10), columns=[('C%03d' % i) for i in range(10)])
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_table_dc(self):
@@ -283,21 +277,21 @@ def time_write_store_table_dc(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_table_mixed(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.index = tm.makeStringIndex(25000)
self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 25000, size=25000), }, index=self.index)
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_table_mixed(self):
@@ -306,20 +300,20 @@ def time_write_store_table_mixed(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_table_panel(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
- self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in xrange(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in xrange(25)])
- remove(self.f)
+ self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in range(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in range(25)])
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_table_panel(self):
@@ -328,24 +322,30 @@ def time_write_store_table_panel(self):
def teardown(self):
self.store.close()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class write_store_table_wide(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.df = DataFrame(np.random.randn(25000, 100))
- remove(self.f)
+ self.remove(self.f)
self.store = HDFStore(self.f)
def time_write_store_table_wide(self):
self.store.append('df10', self.df)
def teardown(self):
- self.store.close()
\ No newline at end of file
+ self.store.close()
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
\ No newline at end of file
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index 9c181c92195ea..8c65f09937df4 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class datetime_index_intersection(object):
@@ -248,7 +248,7 @@ class multiindex_from_product(object):
goal_time = 0.2
def setup(self):
- self.iterables = [tm.makeStringIndex(10000), xrange(20)]
+ self.iterables = [tm.makeStringIndex(10000), range(20)]
def time_multiindex_from_product(self):
MultiIndex.from_product(self.iterables)
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index e76a87ab881c9..32d80a7913234 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -1,5 +1,8 @@
-from pandas_vb_common import *
-import pandas.computation.expressions as expr
+from .pandas_vb_common import *
+try:
+ import pandas.computation.expressions as expr
+except:
+ expr = None
class dataframe_getitem_scalar(object):
@@ -121,6 +124,8 @@ class indexing_dataframe_boolean_no_ne(object):
goal_time = 0.2
def setup(self):
+ if (expr is None):
+ raise NotImplementedError
self.df = DataFrame(np.random.randn(50000, 100))
self.df2 = DataFrame(np.random.randn(50000, 100))
expr.set_use_numexpr(False)
@@ -160,6 +165,8 @@ class indexing_dataframe_boolean_st(object):
goal_time = 0.2
def setup(self):
+ if (expr is None):
+ raise NotImplementedError
self.df = DataFrame(np.random.randn(50000, 100))
self.df2 = DataFrame(np.random.randn(50000, 100))
expr.set_numexpr_threads(1)
@@ -421,6 +428,30 @@ def time_series_loc_slice(self):
self.s.loc[:800000]
+class series_take_dtindex(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(np.random.rand(100000))
+ self.ts = Series(np.random.rand(100000), index=date_range('2011-01-01', freq='S', periods=100000))
+ self.indexer = ([True, False, True, True, False] * 20000)
+
+ def time_series_take_dtindex(self):
+ self.ts.take(self.indexer)
+
+
+class series_take_intindex(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(np.random.rand(100000))
+ self.ts = Series(np.random.rand(100000), index=date_range('2011-01-01', freq='S', periods=100000))
+ self.indexer = ([True, False, True, True, False] * 20000)
+
+ def time_series_take_intindex(self):
+ self.s.take(self.indexer)
+
+
class series_xs_mi_ix(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 2addc810a218f..3fceed087facb 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
import pandas as pd
diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py
index 9eee932de8b7c..a171641502d3c 100644
--- a/asv_bench/benchmarks/io_bench.py
+++ b/asv_bench/benchmarks/io_bench.py
@@ -1,6 +1,9 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas import concat, Timestamp
-from StringIO import StringIO
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
class frame_to_csv(object):
@@ -41,20 +44,20 @@ class frame_to_csv_mixed(object):
goal_time = 0.2
def setup(self):
-
- def create_cols(name):
- return [('%s%03d' % (name, i)) for i in xrange(5)]
- self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=create_cols('float'))
- self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=create_cols('int'))
- self.df_bool = DataFrame(True, index=self.df_float.index, columns=create_cols('bool'))
- self.df_object = DataFrame('foo', index=self.df_float.index, columns=create_cols('object'))
- self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=create_cols('date'))
+ self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=self.create_cols('float'))
+ self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=self.create_cols('int'))
+ self.df_bool = DataFrame(True, index=self.df_float.index, columns=self.create_cols('bool'))
+ self.df_object = DataFrame('foo', index=self.df_float.index, columns=self.create_cols('object'))
+ self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=self.create_cols('date'))
self.df_float.ix[30:500, 1:3] = np.nan
self.df = concat([self.df_float, self.df_int, self.df_bool, self.df_object, self.df_dt], axis=1)
def time_frame_to_csv_mixed(self):
self.df.to_csv('__test__.csv')
+ def create_cols(self, name):
+ return [('%s%03d' % (name, i)) for i in range(5)]
+
class read_csv_infer_datetime_format_custom(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/io_sql.py b/asv_bench/benchmarks/io_sql.py
index e75e691b61c96..9a6b21f9e067a 100644
--- a/asv_bench/benchmarks/io_sql.py
+++ b/asv_bench/benchmarks/io_sql.py
@@ -1,7 +1,7 @@
-from pandas_vb_common import *
-from sqlalchemy import create_engine
-import sqlite3
import sqlalchemy
+from .pandas_vb_common import *
+import sqlite3
+from sqlalchemy import create_engine
class sql_datetime_read_and_parse_sqlalchemy(object):
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index 08ae439e8fd5d..1da0d37d4a8dd 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class append_frame_single_homogenous(object):
@@ -322,38 +322,38 @@ class series_align_int64_index(object):
def setup(self):
self.n = 1000000
-
- def sample(values, k):
- self.sampler = np.random.permutation(len(values))
- return values.take(self.sampler[:k])
self.sz = 500000
self.rng = np.arange(0, 10000000000000, 10000000)
self.stamps = (np.datetime64(datetime.now()).view('i8') + self.rng)
- self.idx1 = np.sort(sample(self.stamps, self.sz))
- self.idx2 = np.sort(sample(self.stamps, self.sz))
+ self.idx1 = np.sort(self.sample(self.stamps, self.sz))
+ self.idx2 = np.sort(self.sample(self.stamps, self.sz))
self.ts1 = Series(np.random.randn(self.sz), self.idx1)
self.ts2 = Series(np.random.randn(self.sz), self.idx2)
def time_series_align_int64_index(self):
(self.ts1 + self.ts2)
+ def sample(self, values, k):
+ self.sampler = np.random.permutation(len(values))
+ return values.take(self.sampler[:k])
+
class series_align_left_monotonic(object):
goal_time = 0.2
def setup(self):
self.n = 1000000
-
- def sample(values, k):
- self.sampler = np.random.permutation(len(values))
- return values.take(self.sampler[:k])
self.sz = 500000
self.rng = np.arange(0, 10000000000000, 10000000)
self.stamps = (np.datetime64(datetime.now()).view('i8') + self.rng)
- self.idx1 = np.sort(sample(self.stamps, self.sz))
- self.idx2 = np.sort(sample(self.stamps, self.sz))
+ self.idx1 = np.sort(self.sample(self.stamps, self.sz))
+ self.idx2 = np.sort(self.sample(self.stamps, self.sz))
self.ts1 = Series(np.random.randn(self.sz), self.idx1)
self.ts2 = Series(np.random.randn(self.sz), self.idx2)
def time_series_align_left_monotonic(self):
- self.ts1.align(self.ts2, join='left')
\ No newline at end of file
+ self.ts1.align(self.ts2, join='left')
+
+ def sample(self, values, k):
+ self.sampler = np.random.permutation(len(values))
+ return values.take(self.sampler[:k])
\ No newline at end of file
diff --git a/asv_bench/benchmarks/miscellaneous.py b/asv_bench/benchmarks/miscellaneous.py
index b9c02c85fb096..fe610ef4cb376 100644
--- a/asv_bench/benchmarks/miscellaneous.py
+++ b/asv_bench/benchmarks/miscellaneous.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas.util.decorators import cache_readonly
diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py
index 81fa7c2238d16..12e48295d8d05 100644
--- a/asv_bench/benchmarks/packers.py
+++ b/asv_bench/benchmarks/packers.py
@@ -1,9 +1,9 @@
+from .pandas_vb_common import *
from numpy.random import randint
import pandas as pd
from collections import OrderedDict
from pandas.compat import BytesIO
import sqlite3
-from pandas_vb_common import *
import os
from sqlalchemy import create_engine
import numpy as np
@@ -16,12 +16,6 @@ class packers_read_csv(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -31,24 +25,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.to_csv(self.f)
def time_packers_read_csv(self):
pd.read_csv(self.f)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_excel(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -58,7 +52,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.bio = BytesIO()
self.writer = pd.io.excel.ExcelWriter(self.bio, engine='xlsxwriter')
self.df[:2000].to_excel(self.writer)
@@ -68,18 +62,18 @@ def time_packers_read_excel(self):
self.bio.seek(0)
pd.read_excel(self.bio)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_hdf_store(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -89,24 +83,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df2.to_hdf(self.f, 'df')
def time_packers_read_hdf_store(self):
pd.read_hdf(self.f, 'df')
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_hdf_table(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -116,24 +110,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df2.to_hdf(self.f, 'df', format='table')
def time_packers_read_hdf_table(self):
pd.read_hdf(self.f, 'df')
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_json(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -143,25 +137,25 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.to_json(self.f, orient='split')
self.df.index = np.arange(self.N)
def time_packers_read_json(self):
pd.read_json(self.f, orient='split')
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_json_date_index(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -171,24 +165,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.to_json(self.f, orient='split')
def time_packers_read_json_date_index(self):
pd.read_json(self.f, orient='split')
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_pack(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -198,24 +192,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df2.to_msgpack(self.f)
def time_packers_read_pack(self):
pd.read_msgpack(self.f)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_pickle(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -225,24 +219,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df2.to_pickle(self.f)
def time_packers_read_pickle(self):
pd.read_pickle(self.f)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_sql(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -252,25 +246,25 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.engine = create_engine('sqlite:///:memory:')
self.df2.to_sql('table', self.engine, if_exists='replace')
def time_packers_read_sql(self):
pd.read_sql_table('table', self.engine)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_stata(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -280,24 +274,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.to_stata(self.f, {'index': 'tc', })
def time_packers_read_stata(self):
pd.read_stata(self.f)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_read_stata_with_validation(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -307,7 +301,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)]
self.df['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)]
self.df['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)]
@@ -317,18 +311,18 @@ def remove(f):
def time_packers_read_stata_with_validation(self):
pd.read_stata(self.f)
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_write_csv(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -338,13 +332,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_csv(self):
self.df.to_csv(self.f)
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_excel_openpyxl(object):
@@ -352,12 +352,6 @@ class packers_write_excel_openpyxl(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -367,7 +361,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.bio = BytesIO()
def time_packers_write_excel_openpyxl(self):
@@ -376,18 +370,18 @@ def time_packers_write_excel_openpyxl(self):
self.df[:2000].to_excel(self.writer)
self.writer.save()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_write_excel_xlsxwriter(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -397,7 +391,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.bio = BytesIO()
def time_packers_write_excel_xlsxwriter(self):
@@ -406,18 +400,18 @@ def time_packers_write_excel_xlsxwriter(self):
self.df[:2000].to_excel(self.writer)
self.writer.save()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_write_excel_xlwt(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -427,7 +421,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.bio = BytesIO()
def time_packers_write_excel_xlwt(self):
@@ -436,18 +430,18 @@ def time_packers_write_excel_xlwt(self):
self.df[:2000].to_excel(self.writer)
self.writer.save()
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_write_hdf_store(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -457,13 +451,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_hdf_store(self):
self.df2.to_hdf(self.f, 'df')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_hdf_table(object):
@@ -471,12 +471,6 @@ class packers_write_hdf_table(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -486,13 +480,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_hdf_table(self):
self.df2.to_hdf(self.f, 'df', table=True)
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json(object):
@@ -500,12 +500,6 @@ class packers_write_json(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -515,14 +509,20 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.index = np.arange(self.N)
def time_packers_write_json(self):
self.df.to_json(self.f, orient='split')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_T(object):
@@ -530,12 +530,6 @@ class packers_write_json_T(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -545,14 +539,20 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.index = np.arange(self.N)
def time_packers_write_json_T(self):
self.df.to_json(self.f, orient='columns')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_date_index(object):
@@ -560,12 +560,6 @@ class packers_write_json_date_index(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -575,13 +569,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_json_date_index(self):
self.df.to_json(self.f, orient='split')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_mixed_delta_int_tstamp(object):
@@ -589,12 +589,6 @@ class packers_write_json_mixed_delta_int_tstamp(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -604,7 +598,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.cols = [(lambda i: ('{0}_timedelta'.format(i), [pd.Timedelta(('%d seconds' % randrange(1000000.0))) for _ in range(self.N)])), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_timestamp'.format(i), [pd.Timestamp((1418842918083256000 + randrange(1000000000.0, 1e+18, 200))) for _ in range(self.N)]))]
self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index)
@@ -612,7 +606,13 @@ def time_packers_write_json_mixed_delta_int_tstamp(self):
self.df_mixed.to_json(self.f, orient='split')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_mixed_float_int(object):
@@ -620,12 +620,6 @@ class packers_write_json_mixed_float_int(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -635,7 +629,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N)))]
self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index)
@@ -643,7 +637,13 @@ def time_packers_write_json_mixed_float_int(self):
self.df_mixed.to_json(self.f, orient='index')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_mixed_float_int_T(object):
@@ -651,12 +651,6 @@ class packers_write_json_mixed_float_int_T(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -666,7 +660,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N)))]
self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index)
@@ -674,7 +668,13 @@ def time_packers_write_json_mixed_float_int_T(self):
self.df_mixed.to_json(self.f, orient='columns')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_json_mixed_float_int_str(object):
@@ -682,12 +682,6 @@ class packers_write_json_mixed_float_int_str(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -697,7 +691,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_str'.format(i), [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]))]
self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index)
@@ -705,7 +699,13 @@ def time_packers_write_json_mixed_float_int_str(self):
self.df_mixed.to_json(self.f, orient='split')
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_pack(object):
@@ -713,12 +713,6 @@ class packers_write_pack(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -728,13 +722,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_pack(self):
self.df2.to_msgpack(self.f)
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_pickle(object):
@@ -742,12 +742,6 @@ class packers_write_pickle(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -757,13 +751,19 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
def time_packers_write_pickle(self):
self.df2.to_pickle(self.f)
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_sql(object):
@@ -771,12 +771,6 @@ class packers_write_sql(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -786,24 +780,24 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.engine = create_engine('sqlite:///:memory:')
def time_packers_write_sql(self):
self.df2.to_sql('table', self.engine, if_exists='replace')
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
+
class packers_write_stata(object):
goal_time = 0.2
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -813,14 +807,20 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df.to_stata(self.f, {'index': 'tc', })
def time_packers_write_stata(self):
self.df.to_stata(self.f, {'index': 'tc', })
def teardown(self):
- remove(self.f)
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
class packers_write_stata_with_validation(object):
@@ -828,12 +828,6 @@ class packers_write_stata_with_validation(object):
def setup(self):
self.f = '__test__.msg'
-
- def remove(f):
- try:
- os.remove(self.f)
- except:
- pass
self.N = 100000
self.C = 5
self.index = date_range('20000101', periods=self.N, freq='H')
@@ -843,7 +837,7 @@ def remove(f):
self.index = date_range('20000101', periods=self.N, freq='H')
self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index)
self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]
- remove(self.f)
+ self.remove(self.f)
self.df['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)]
self.df['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)]
self.df['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)]
@@ -854,4 +848,10 @@ def time_packers_write_stata_with_validation(self):
self.df.to_stata(self.f, {'index': 'tc', })
def teardown(self):
- remove(self.f)
\ No newline at end of file
+ self.remove(self.f)
+
+ def remove(self, f):
+ try:
+ os.remove(self.f)
+ except:
+ pass
\ No newline at end of file
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
deleted file mode 120000
index 6e2e449a4c00a..0000000000000
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ /dev/null
@@ -1 +0,0 @@
-../../vb_suite/pandas_vb_common.py
\ No newline at end of file
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
new file mode 100644
index 0000000000000..a1326d63a112a
--- /dev/null
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -0,0 +1,30 @@
+from pandas import *
+import pandas as pd
+from datetime import timedelta
+from numpy.random import randn
+from numpy.random import randint
+from numpy.random import permutation
+import pandas.util.testing as tm
+import random
+import numpy as np
+try:
+ from pandas.compat import range
+except ImportError:
+ pass
+
+np.random.seed(1234)
+try:
+ import pandas._tseries as lib
+except:
+ import pandas.lib as lib
+
+try:
+ Panel = WidePanel
+except Exception:
+ pass
+
+# didn't add to namespace until later
+try:
+ from pandas.core.index import MultiIndex
+except ImportError:
+ pass
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py
index c755cb122a0bf..0b0e73847aa96 100644
--- a/asv_bench/benchmarks/panel_ctor.py
+++ b/asv_bench/benchmarks/panel_ctor.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class panel_from_dict_all_different_indexes(object):
@@ -8,7 +8,7 @@ def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
- for x in xrange(100):
+ for x in range(100):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
@@ -23,7 +23,7 @@ class panel_from_dict_equiv_indexes(object):
def setup(self):
self.data_frames = {}
- for x in xrange(100):
+ for x in range(100):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1)))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
@@ -38,7 +38,7 @@ class panel_from_dict_same_index(object):
def setup(self):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1)))
self.data_frames = {}
- for x in xrange(100):
+ for x in range(100):
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
@@ -53,7 +53,7 @@ def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
- for x in xrange(100):
+ for x in range(100):
if (x == 50):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py
index 4145b68dca997..90118eaf6e407 100644
--- a/asv_bench/benchmarks/panel_methods.py
+++ b/asv_bench/benchmarks/panel_methods.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class panel_pct_change_items(object):
diff --git a/asv_bench/benchmarks/parser_vb.py b/asv_bench/benchmarks/parser_vb.py
index 46167dc2bb33c..18cd4de6cc9c5 100644
--- a/asv_bench/benchmarks/parser_vb.py
+++ b/asv_bench/benchmarks/parser_vb.py
@@ -1,7 +1,10 @@
-from cStringIO import StringIO
-from pandas_vb_common import *
+from .pandas_vb_common import *
import os
from pandas import read_csv, read_table
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
class read_csv_comment2(object):
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py
index d1df1b429c656..f46082ac6f288 100644
--- a/asv_bench/benchmarks/plotting.py
+++ b/asv_bench/benchmarks/plotting.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
try:
from pandas import date_range
except ImportError:
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 03e654b4886cc..b1c039058ff8f 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from random import shuffle
@@ -168,20 +168,20 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
+ def time_reindex_daterange_backfill(self):
+ self.backfill(self.ts2, self.ts.index)
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
- def time_reindex_daterange_backfill(self):
- backfill(self.ts2, self.ts.index)
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
class reindex_daterange_pad(object):
@@ -194,20 +194,20 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
+ def time_reindex_daterange_pad(self):
+ self.pad(self.ts2, self.ts.index)
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
- def time_reindex_daterange_pad(self):
- pad(self.ts2, self.ts.index)
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
class reindex_fillna_backfill(object):
@@ -220,21 +220,21 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
-
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
-
def time_reindex_fillna_backfill(self):
self.ts3.fillna(method='backfill')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
+
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
+
class reindex_fillna_backfill_float32(object):
goal_time = 0.2
@@ -246,21 +246,21 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
-
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
-
def time_reindex_fillna_backfill_float32(self):
self.ts4.fillna(method='backfill')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
+
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
+
class reindex_fillna_pad(object):
goal_time = 0.2
@@ -272,21 +272,21 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
-
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
-
def time_reindex_fillna_pad(self):
self.ts3.fillna(method='pad')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
+
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
+
class reindex_fillna_pad_float32(object):
goal_time = 0.2
@@ -298,21 +298,21 @@ def setup(self):
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
- def pad(source_series, target_index):
- try:
- source_series.reindex(target_index, method='pad')
- except:
- source_series.reindex(target_index, fillMethod='pad')
-
- def backfill(source_series, target_index):
- try:
- source_series.reindex(target_index, method='backfill')
- except:
- source_series.reindex(target_index, fillMethod='backfill')
-
def time_reindex_fillna_pad_float32(self):
self.ts4.fillna(method='pad')
+ def pad(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='pad')
+ except:
+ source_series.reindex(target_index, fillMethod='pad')
+
+ def backfill(self, source_series, target_index):
+ try:
+ source_series.reindex(target_index, method='backfill')
+ except:
+ source_series.reindex(target_index, fillMethod='backfill')
+
class reindex_frame_level_align(object):
goal_time = 0.2
@@ -362,18 +362,18 @@ class series_align_irregular_string(object):
def setup(self):
self.n = 50000
self.indices = tm.makeStringIndex(self.n)
-
- def sample(values, k):
- self.sampler = np.arange(len(values))
- shuffle(self.sampler)
- return values.take(self.sampler[:k])
self.subsample_size = 40000
self.x = Series(np.random.randn(50000), self.indices)
- self.y = Series(np.random.randn(self.subsample_size), index=sample(self.indices, self.subsample_size))
+ self.y = Series(np.random.randn(self.subsample_size), index=self.sample(self.indices, self.subsample_size))
def time_series_align_irregular_string(self):
(self.x + self.y)
+ def sample(self, values, k):
+ self.sampler = np.arange(len(values))
+ shuffle(self.sampler)
+ return values.take(self.sampler[:k])
+
class series_drop_duplicates_int(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index 9b78c287c5ad4..e9f33ebfce0bd 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas.compat import range
from datetime import timedelta
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index b4081957af97b..604fa5092a231 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas.core.reshape import melt
@@ -22,19 +22,19 @@ class reshape_pivot_time_series(object):
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
-
- def unpivot(frame):
- (N, K) = frame.shape
- self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), }
- return DataFrame(self.data, columns=['date', 'variable', 'value'])
self.index = date_range('1/1/2000', periods=10000, freq='h')
self.df = DataFrame(randn(10000, 50), index=self.index, columns=range(50))
- self.pdf = unpivot(self.df)
+ self.pdf = self.unpivot(self.df)
self.f = (lambda : self.pdf.pivot('date', 'variable', 'value'))
def time_reshape_pivot_time_series(self):
self.f()
+ def unpivot(self, frame):
+ (N, K) = frame.shape
+ self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), }
+ return DataFrame(self.data, columns=['date', 'variable', 'value'])
+
class reshape_stack_simple(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 9cd61c741dae1..d2167a8b6e9e1 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class series_isin_int64(object):
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index dbf35f5e40f55..d7ee58fc978ea 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -1,6 +1,6 @@
-from pandas_vb_common import *
-import scipy.sparse
+from .pandas_vb_common import *
import pandas.sparse.series
+import scipy.sparse
from pandas.core.sparse import SparseSeries, SparseDataFrame
from pandas.core.sparse import SparseDataFrame
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index 98e2bbfce1a44..4125357455d2e 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
class stat_ops_frame_mean_float_axis_0(object):
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index 5adfbf4c2557d..e4f91b1b9c0c6 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
import string
import itertools as IT
import pandas.util.testing as testing
@@ -8,99 +8,99 @@ class strings_cat(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_cat(self):
self.many.str.cat(sep=',')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_center(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_center(self):
self.many.str.center(100)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_contains_few(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_contains_few(self):
self.few.str.contains('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_contains_few_noregex(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_contains_few_noregex(self):
self.few.str.contains('matchthis', regex=False)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_contains_many(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_contains_many(self):
self.many.str.contains('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_contains_many_noregex(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_contains_many_noregex(self):
self.many.str.contains('matchthis', regex=False)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_count(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_count(self):
self.many.str.count('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_encode_decode(object):
goal_time = 0.2
@@ -116,278 +116,278 @@ class strings_endswith(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_endswith(self):
self.many.str.endswith('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_extract(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_extract(self):
self.many.str.extract('(\\w*)matchthis(\\w*)')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_findall(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_findall(self):
self.many.str.findall('[A-Z]+')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_get(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_get(self):
self.many.str.get(0)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_get_dummies(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
- self.s = make_series(string.uppercase, strlen=10, size=10000).str.join('|')
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
+ self.s = self.make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|')
def time_strings_get_dummies(self):
self.s.str.get_dummies('|')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_join_split(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_join_split(self):
self.many.str.join('--').str.split('--')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_join_split_expand(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_join_split_expand(self):
self.many.str.join('--').str.split('--', expand=True)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_len(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_len(self):
self.many.str.len()
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_lower(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_lower(self):
self.many.str.lower()
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_lstrip(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_lstrip(self):
self.many.str.lstrip('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_match(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_match(self):
self.many.str.match('mat..this')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_pad(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_pad(self):
self.many.str.pad(100, side='both')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_repeat(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_repeat(self):
self.many.str.repeat(list(IT.islice(IT.cycle(range(1, 4)), len(self.many))))
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_replace(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_replace(self):
self.many.str.replace('(matchthis)', '\x01\x01')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_rstrip(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_rstrip(self):
self.many.str.rstrip('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_slice(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_slice(self):
self.many.str.slice(5, 15, 2)
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_startswith(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_startswith(self):
self.many.str.startswith('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_strip(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_strip(self):
self.many.str.strip('matchthis')
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_title(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_title(self):
self.many.str.title()
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
+
class strings_upper(object):
goal_time = 0.2
def setup(self):
-
- def make_series(letters, strlen, size):
- return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen)))
- self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000)
- self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000)
+ self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
+ self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
def time_strings_upper(self):
- self.many.str.upper()
\ No newline at end of file
+ self.many.str.upper()
+
+ def make_series(self, letters, strlen, size):
+ return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
\ No newline at end of file
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py
index 36a0f98e3f5ef..2f252a4d3e1dc 100644
--- a/asv_bench/benchmarks/timedelta.py
+++ b/asv_bench/benchmarks/timedelta.py
@@ -1,4 +1,4 @@
-from pandas_vb_common import *
+from .pandas_vb_common import *
from pandas import to_timedelta
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 266c198de1455..db0c526f25c7b 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -1,10 +1,13 @@
from pandas.tseries.converter import DatetimeConverter
+from .pandas_vb_common import *
import pandas as pd
from datetime import timedelta
import datetime as dt
-from pandas_vb_common import *
+try:
+ import pandas.tseries.holiday
+except ImportError:
+ pass
from pandas.tseries.frequencies import infer_freq
-import pandas.tseries.holiday
import numpy as np
@@ -631,6 +634,63 @@ def time_timeseries_custom_bmonthend_incr_n(self):
(self.date + (10 * self.cme))
+class timeseries_datetimeindex_offset_delta(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_datetimeindex_offset_delta(self):
+ (self.idx1 + self.delta_offset)
+
+
+class timeseries_datetimeindex_offset_fast(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_datetimeindex_offset_fast(self):
+ (self.idx1 + self.fast_offset)
+
+
+class timeseries_datetimeindex_offset_slow(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_datetimeindex_offset_slow(self):
+ (self.idx1 + self.slow_offset)
+
+
class timeseries_day_apply(object):
goal_time = 0.2
@@ -723,15 +783,15 @@ def setup(self):
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
- def iter_n(iterable, n=None):
- self.i = 0
- for _ in iterable:
- self.i += 1
- if ((n is not None) and (self.i > n)):
- break
-
def time_timeseries_iter_datetimeindex(self):
- iter_n(self.idx1)
+ self.iter_n(self.idx1)
+
+ def iter_n(self, iterable, n=None):
+ self.i = 0
+ for _ in iterable:
+ self.i += 1
+ if ((n is not None) and (self.i > n)):
+ break
class timeseries_iter_datetimeindex_preexit(object):
@@ -748,15 +808,15 @@ def setup(self):
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
- def iter_n(iterable, n=None):
- self.i = 0
- for _ in iterable:
- self.i += 1
- if ((n is not None) and (self.i > n)):
- break
-
def time_timeseries_iter_datetimeindex_preexit(self):
- iter_n(self.idx1, self.M)
+ self.iter_n(self.idx1, self.M)
+
+ def iter_n(self, iterable, n=None):
+ self.i = 0
+ for _ in iterable:
+ self.i += 1
+ if ((n is not None) and (self.i > n)):
+ break
class timeseries_iter_periodindex(object):
@@ -773,15 +833,15 @@ def setup(self):
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
- def iter_n(iterable, n=None):
- self.i = 0
- for _ in iterable:
- self.i += 1
- if ((n is not None) and (self.i > n)):
- break
-
def time_timeseries_iter_periodindex(self):
- iter_n(self.idx2)
+ self.iter_n(self.idx2)
+
+ def iter_n(self, iterable, n=None):
+ self.i = 0
+ for _ in iterable:
+ self.i += 1
+ if ((n is not None) and (self.i > n)):
+ break
class timeseries_iter_periodindex_preexit(object):
@@ -798,15 +858,15 @@ def setup(self):
self.idx1 = date_range(start='20140101', freq='T', periods=self.N)
self.idx2 = period_range(start='20140101', freq='T', periods=self.N)
- def iter_n(iterable, n=None):
- self.i = 0
- for _ in iterable:
- self.i += 1
- if ((n is not None) and (self.i > n)):
- break
-
def time_timeseries_iter_periodindex_preexit(self):
- iter_n(self.idx2, self.M)
+ self.iter_n(self.idx2, self.M)
+
+ def iter_n(self, iterable, n=None):
+ self.i = 0
+ for _ in iterable:
+ self.i += 1
+ if ((n is not None) and (self.i > n)):
+ break
class timeseries_large_lookup_value(object):
@@ -859,6 +919,63 @@ def time_timeseries_resample_datetime64(self):
self.ts.resample('1S', how='last')
+class timeseries_series_offset_delta(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_series_offset_delta(self):
+ (self.s + self.delta_offset)
+
+
+class timeseries_series_offset_fast(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_series_offset_fast(self):
+ (self.s + self.fast_offset)
+
+
+class timeseries_series_offset_slow(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ if hasattr(Series, 'convert'):
+ Series.resample = Series.convert
+ self.ts = Series(np.random.randn(self.N), index=self.rng)
+ self.N = 100000
+ self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
+ self.delta_offset = pd.offsets.Day()
+ self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.slow_offset = pd.offsets.BusinessDay()
+
+ def time_timeseries_series_offset_slow(self):
+ (self.s + self.slow_offset)
+
+
class timeseries_slice_minutely(object):
goal_time = 0.2
diff --git a/asv_bench/vbench_to_asv.py b/asv_bench/vbench_to_asv.py
index b3980ffed1a57..c3041ec2b1ba1 100644
--- a/asv_bench/vbench_to_asv.py
+++ b/asv_bench/vbench_to_asv.py
@@ -43,7 +43,29 @@ def __init__(self):
def visit_ClassDef(self, node):
self.transforms = {}
self.in_class_define = True
+
+ functions_to_promote = []
+ setup_func = None
+
+ for class_func in ast.iter_child_nodes(node):
+ if isinstance(class_func, ast.FunctionDef):
+ if class_func.name == 'setup':
+ setup_func = class_func
+ for anon_func in ast.iter_child_nodes(class_func):
+ if isinstance(anon_func, ast.FunctionDef):
+ functions_to_promote.append(anon_func)
+
+ if setup_func:
+ for func in functions_to_promote:
+ setup_func.body.remove(func)
+ func.args.args.insert(0, ast.Name(id='self', ctx=ast.Load()))
+ node.body.append(func)
+ self.transforms[func.name] = 'self.' + func.name
+
+ ast.fix_missing_locations(node)
+
self.generic_visit(node)
+
return node
def visit_TryExcept(self, node):
@@ -81,18 +103,8 @@ def visit_FunctionDef(self, node):
"""Delete functions that are empty due to imports being moved"""
self.in_class_define = False
- if self.in_setup:
- node.col_offset -= 4
- ast.increment_lineno(node, -1)
-
- if node.name == 'setup':
- self.in_setup = True
-
self.generic_visit(node)
- if node.name == 'setup':
- self.in_setup = False
-
if node.body:
return node
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 4ec2258df56f2..2c9b6a0a889f4 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -512,9 +512,49 @@ entire suite. This is done using one of the following constructs:
Running the performance test suite
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Performance matters and it is worth considering that your code has not introduced
+performance regressions. *pandas* is in the process of migrating to the
+`asv library <https://github.com/spacetelescope/asv>`__
+to enable easy monitoring of the performance of critical *pandas* operations.
+These benchmarks are all found in the ``pandas/asv_bench`` directory. *asv*
+supports both python2 and python3.
+
+.. note::
+
+ The *asv* benchmark suite was translated from the previous framework, vbench,
+ so many stylistic issues are likely a result of automated transformation of the
+ code.
+
+To install asv::
+
+ pip install git+https://github.com/spacetelescope/asv
+
+If you need to run a benchmark, change your directory to asv_bench/ and run
+the following if you have been developing on master::
+
+ asv continuous master
+
+Otherwise, if you are working on another branch, either of the following can be used::
+
+ asv continuous master HEAD
+ asv continuous master your_branch
+
+This will checkout the master revision and run the suite on both master and
+your commit. Running the full test suite can take up to one hour and use up
+to 3GB of RAM. Usually it is sufficient to paste a subset of the results in
+to the Pull Request to show that the committed changes do not cause unexpected
+performance regressions.
+
+You can run specific benchmarks using the *-b* flag which takes a regular expression.
+
+Information on how to write a benchmark can be found in
+`*asv*'s documentation http://asv.readthedocs.org/en/latest/writing_benchmarks.html`.
+
+Running the vbench performance test suite (phasing out)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Performance matters and it is worth considering that your code has not introduced
-performance regressions. Currently *pandas* uses the `vbench library <https://github.com/pydata/vbench>`__
+performance regressions. Historically, *pandas* used `vbench library <https://github.com/pydata/vbench>`__
to enable easy monitoring of the performance of critical *pandas* operations.
These benchmarks are all found in the ``pandas/vb_suite`` directory. vbench
currently only works on python2.
@@ -530,7 +570,7 @@ using pip. If you need to run a benchmark, change your directory to the *pandas
This will checkout the master revision and run the suite on both master and
your commit. Running the full test suite can take up to one hour and use up
-to 3GB of RAM. Usually it is sufficient to past a subset of the results in
+to 3GB of RAM. Usually it is sufficient to paste a subset of the results in
to the Pull Request to show that the committed changes do not cause unexpected
performance regressions.
diff --git a/vb_suite/attrs_caching.py b/vb_suite/attrs_caching.py
index e196546e632fe..a7e3ed7094ed6 100644
--- a/vb_suite/attrs_caching.py
+++ b/vb_suite/attrs_caching.py
@@ -1,6 +1,6 @@
from vbench.benchmark import Benchmark
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py
index cd8d1ad93b6e1..4c74688ce660e 100644
--- a/vb_suite/binary_ops.py
+++ b/vb_suite/binary_ops.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
SECTION = 'Binary ops'
diff --git a/vb_suite/categoricals.py b/vb_suite/categoricals.py
index cb33f1bb6c0b1..a08d479df20cb 100644
--- a/vb_suite/categoricals.py
+++ b/vb_suite/categoricals.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/ctors.py b/vb_suite/ctors.py
index 6af8e65b8f57d..8123322383f0a 100644
--- a/vb_suite/ctors.py
+++ b/vb_suite/ctors.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/eval.py b/vb_suite/eval.py
index a350cdc54cd17..bf80aad956184 100644
--- a/vb_suite/eval.py
+++ b/vb_suite/eval.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
import pandas as pd
df = DataFrame(np.random.randn(20000, 100))
df2 = DataFrame(np.random.randn(20000, 100))
@@ -112,7 +112,7 @@
start_date=datetime(2013, 7, 26))
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
setup = common_setup + """
diff --git a/vb_suite/frame_ctor.py b/vb_suite/frame_ctor.py
index 8ad63fc556c2e..0d57da7b88d3b 100644
--- a/vb_suite/frame_ctor.py
+++ b/vb_suite/frame_ctor.py
@@ -5,7 +5,7 @@
except:
import pandas.core.datetools as offsets
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
try:
from pandas.tseries.offsets import *
except:
@@ -40,7 +40,7 @@
# nested dict, integer indexes, regression described in #621
setup = common_setup + """
-data = dict((i,dict((j,float(j)) for j in xrange(100))) for i in xrange(2000))
+data = dict((i,dict((j,float(j)) for j in range(100))) for i in xrange(2000))
"""
frame_ctor_nested_dict_int64 = Benchmark("DataFrame(data)", setup)
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index ce5109efe8f6d..46343e9c607fd 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
@@ -98,11 +98,11 @@ def g():
pass
def h():
- for i in xrange(10000):
+ for i in range(10000):
df2['A']
def j():
- for i in xrange(10000):
+ for i in range(10000):
df3[0]
"""
@@ -126,8 +126,8 @@ def j():
setup = common_setup + """
idx = date_range('1/1/2000', periods=100000, freq='D')
df = DataFrame(randn(100000, 1),columns=['A'],index=idx)
-def f(x):
- x = x.copy()
+def f(df):
+ x = df.copy()
x['date'] = x.index
"""
@@ -494,7 +494,7 @@ def test_unequal(name):
setup = common_setup + """
def get_data(n=100000):
- return ((x, x*20, x*100) for x in xrange(n))
+ return ((x, x*20, x*100) for x in range(n))
"""
frame_from_records_generator = Benchmark('df = DataFrame.from_records(get_data())',
diff --git a/vb_suite/gil.py b/vb_suite/gil.py
index d5aec7c3e2917..df2bd2dcd8db4 100644
--- a/vb_suite/gil.py
+++ b/vb_suite/gil.py
@@ -1,11 +1,20 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
basic = common_setup + """
-from pandas.util.testing import test_parallel
+try:
+ from pandas.util.testing import test_parallel
+ have_real_test_parallel = True
+except ImportError:
+ have_real_test_parallel = False
+ def test_parallel(num_threads=1):
+ def wrapper(fname):
+ return fname
+
+ return wrapper
N = 1000000
ngroups = 1000
@@ -13,6 +22,9 @@
df = DataFrame({'key' : np.random.randint(0,ngroups,size=N),
'data' : np.random.randn(N) })
+
+if not have_real_test_parallel:
+ raise NotImplementedError
"""
setup = basic + """
diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py
index 3e3b0241545e5..bc21372225322 100644
--- a/vb_suite/groupby.py
+++ b/vb_suite/groupby.py
@@ -1,14 +1,14 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
setup = common_setup + """
N = 100000
ngroups = 100
-def get_test_data(ngroups=100, n=N):
+def get_test_data(ngroups=100, n=100000):
unique_groups = range(ngroups)
arr = np.asarray(np.tile(unique_groups, n / ngroups), dtype=object)
@@ -429,16 +429,16 @@ def f(g):
security_ids = map(lambda x: hex(x)[2:10].upper(), range(secid_min, secid_max + 1, step))
data_index = MultiIndex(levels=[dates.values, security_ids],
- labels=[[i for i in xrange(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates],
+ labels=[[i for i in range(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates],
names=['date', 'security_id'])
n_data = len(data_index)
-columns = Index(['factor{}'.format(i) for i in xrange(1, n_columns + 1)])
+columns = Index(['factor{}'.format(i) for i in range(1, n_columns + 1)])
data = DataFrame(np.random.randn(n_data, n_columns), index=data_index, columns=columns)
step = int(n_data * share_na)
-for column_index in xrange(n_columns):
+for column_index in range(n_columns):
index = column_index
while index < n_data:
data.set_value(data_index[index], columns[column_index], np.nan)
diff --git a/vb_suite/hdfstore_bench.py b/vb_suite/hdfstore_bench.py
index a822ad1c614be..393fd4cc77e66 100644
--- a/vb_suite/hdfstore_bench.py
+++ b/vb_suite/hdfstore_bench.py
@@ -3,7 +3,7 @@
start_date = datetime(2012, 7, 1)
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
import os
f = '__test__.h5'
@@ -234,8 +234,8 @@ def remove(f):
# select from a panel table
setup13 = common_setup + """
-p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in xrange(20) ],
- major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in xrange(25) ])
+p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ],
+ major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ])
remove(f)
store = HDFStore(f)
@@ -251,8 +251,8 @@ def remove(f):
# write to a panel table
setup14 = common_setup + """
-p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in xrange(20) ],
- major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in xrange(25) ])
+p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ],
+ major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ])
remove(f)
store = HDFStore(f)
@@ -266,7 +266,7 @@ def remove(f):
# write to a table (data_columns)
setup15 = common_setup + """
-df = DataFrame(np.random.randn(10000,10),columns = [ 'C%03d' % i for i in xrange(10) ])
+df = DataFrame(np.random.randn(10000,10),columns = [ 'C%03d' % i for i in range(10) ])
remove(f)
store = HDFStore(f)
diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py
index 768eb2658af8f..2ab2bc15f3853 100644
--- a/vb_suite/index_object.py
+++ b/vb_suite/index_object.py
@@ -4,7 +4,7 @@
SECTION = "Index / MultiIndex objects"
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
@@ -111,7 +111,7 @@
#
setup = common_setup + """
-iterables = [tm.makeStringIndex(10000), xrange(20)]
+iterables = [tm.makeStringIndex(10000), range(20)]
"""
multiindex_from_product = Benchmark('MultiIndex.from_product(iterables)',
diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py
index f2236c48fb002..3d95d52dccd71 100644
--- a/vb_suite/indexing.py
+++ b/vb_suite/indexing.py
@@ -3,7 +3,7 @@
SECTION = 'Indexing and scalar value access'
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
@@ -140,7 +140,13 @@
start_date=datetime(2012, 1, 1))
setup = common_setup + """
-import pandas.computation.expressions as expr
+try:
+ import pandas.computation.expressions as expr
+except:
+ expr = None
+
+if expr is None:
+ raise NotImplementedError
df = DataFrame(np.random.randn(50000, 100))
df2 = DataFrame(np.random.randn(50000, 100))
expr.set_numexpr_threads(1)
@@ -152,7 +158,13 @@
setup = common_setup + """
-import pandas.computation.expressions as expr
+try:
+ import pandas.computation.expressions as expr
+except:
+ expr = None
+
+if expr is None:
+ raise NotImplementedError
df = DataFrame(np.random.randn(50000, 100))
df2 = DataFrame(np.random.randn(50000, 100))
expr.set_use_numexpr(False)
diff --git a/vb_suite/inference.py b/vb_suite/inference.py
index 8855f7e654bb1..aaa51aa5163ce 100644
--- a/vb_suite/inference.py
+++ b/vb_suite/inference.py
@@ -4,7 +4,7 @@
# from GH 7332
-setup = """from pandas_vb_common import *
+setup = """from .pandas_vb_common import *
import pandas as pd
N = 500000
df_int64 = DataFrame(dict(A = np.arange(N,dtype='int64'), B = np.arange(N,dtype='int64')))
diff --git a/vb_suite/io_bench.py b/vb_suite/io_bench.py
index 483d61387898d..af5f6076515cc 100644
--- a/vb_suite/io_bench.py
+++ b/vb_suite/io_bench.py
@@ -1,8 +1,8 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
-from StringIO import StringIO
+common_setup = """from .pandas_vb_common import *
+from io import StringIO
"""
#----------------------------------------------------------------------
@@ -77,7 +77,7 @@
from pandas import concat, Timestamp
def create_cols(name):
- return [ "%s%03d" % (name,i) for i in xrange(5) ]
+ return [ "%s%03d" % (name,i) for i in range(5) ]
df_float = DataFrame(np.random.randn(5000, 5),dtype='float64',columns=create_cols('float'))
df_int = DataFrame(np.random.randn(5000, 5),dtype='int64',columns=create_cols('int'))
df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
diff --git a/vb_suite/io_sql.py b/vb_suite/io_sql.py
index 7f580165939bb..ba8367e7e356b 100644
--- a/vb_suite/io_sql.py
+++ b/vb_suite/io_sql.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
import sqlite3
import sqlalchemy
from sqlalchemy import create_engine
diff --git a/vb_suite/join_merge.py b/vb_suite/join_merge.py
index 244c6abe71b05..238a129552e90 100644
--- a/vb_suite/join_merge.py
+++ b/vb_suite/join_merge.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
setup = common_setup + """
diff --git a/vb_suite/miscellaneous.py b/vb_suite/miscellaneous.py
index 27efadc7acfe0..da2c736e79ea7 100644
--- a/vb_suite/miscellaneous.py
+++ b/vb_suite/miscellaneous.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/packers.py b/vb_suite/packers.py
index 60738a62bd287..69ec10822b392 100644
--- a/vb_suite/packers.py
+++ b/vb_suite/packers.py
@@ -3,7 +3,7 @@
start_date = datetime(2013, 5, 1)
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
import os
import pandas as pd
from pandas.core import common as com
diff --git a/vb_suite/pandas_vb_common.py b/vb_suite/pandas_vb_common.py
index 128e262d45d66..a1326d63a112a 100644
--- a/vb_suite/pandas_vb_common.py
+++ b/vb_suite/pandas_vb_common.py
@@ -7,6 +7,10 @@
import pandas.util.testing as tm
import random
import numpy as np
+try:
+ from pandas.compat import range
+except ImportError:
+ pass
np.random.seed(1234)
try:
diff --git a/vb_suite/panel_ctor.py b/vb_suite/panel_ctor.py
index b6637bb1e61ec..9f497e7357a61 100644
--- a/vb_suite/panel_ctor.py
+++ b/vb_suite/panel_ctor.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
@@ -14,7 +14,7 @@
dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1),
freq=datetools.Day(1)))
data_frames = {}
-for x in xrange(100):
+for x in range(100):
df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr),
"c": [2]*len(dr)}, index=dr)
data_frames[x] = df
@@ -27,7 +27,7 @@
setup_equiv_indexes = common_setup + """
data_frames = {}
-for x in xrange(100):
+for x in range(100):
dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1),
freq=datetools.Day(1)))
df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr),
@@ -44,7 +44,7 @@
data_frames = {}
start = datetime(1990,1,1)
end = datetime(2012,1,1)
-for x in xrange(100):
+for x in range(100):
end += timedelta(days=1)
dr = np.asarray(date_range(start, end))
df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr),
@@ -61,7 +61,7 @@
data_frames = {}
start = datetime(1990,1,1)
end = datetime(2012,1,1)
-for x in xrange(100):
+for x in range(100):
if x == 50:
end += timedelta(days=1)
dr = np.asarray(date_range(start, end))
diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py
index 5e88671a23707..28586422a66e3 100644
--- a/vb_suite/panel_methods.py
+++ b/vb_suite/panel_methods.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/parser_vb.py b/vb_suite/parser_vb.py
index 96da3fac2de5e..bb9ccbdb5e854 100644
--- a/vb_suite/parser_vb.py
+++ b/vb_suite/parser_vb.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
from pandas import read_csv, read_table
"""
@@ -44,7 +44,11 @@
start_date=datetime(2011, 11, 1))
setup = common_setup + """
-from cStringIO import StringIO
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
import os
N = 10000
K = 8
@@ -63,7 +67,11 @@
read_table_multiple_date = Benchmark(cmd, setup, start_date=sdate)
setup = common_setup + """
-from cStringIO import StringIO
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
import os
N = 10000
K = 8
@@ -81,7 +89,11 @@
read_table_multiple_date_baseline = Benchmark(cmd, setup, start_date=sdate)
setup = common_setup + """
-from cStringIO import StringIO
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
data = '''\
0.1213700904466425978256438611,0.0525708283766902484401839501,0.4174092731488769913994474336
0.4096341697147408700274695547,0.1587830198973579909349496119,0.1292545832485494372576795285
diff --git a/vb_suite/plotting.py b/vb_suite/plotting.py
index 88d272e7be4b3..79e81e9eea8f4 100644
--- a/vb_suite/plotting.py
+++ b/vb_suite/plotting.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
try:
from pandas import date_range
diff --git a/vb_suite/reindex.py b/vb_suite/reindex.py
index 07f0e0f7e1bff..443eb43835745 100644
--- a/vb_suite/reindex.py
+++ b/vb_suite/reindex.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/replace.py b/vb_suite/replace.py
index 23d41e7c8e632..9326aa5becca9 100644
--- a/vb_suite/replace.py
+++ b/vb_suite/replace.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
from datetime import timedelta
N = 1000000
@@ -15,7 +15,7 @@
ts = Series(np.random.randn(N), index=rng)
"""
-large_dict_setup = """from pandas_vb_common import *
+large_dict_setup = """from .pandas_vb_common import *
from pandas.compat import range
n = 10 ** 6
start_value = 10 ** 5
diff --git a/vb_suite/reshape.py b/vb_suite/reshape.py
index f6eaeb353acb5..daab96103f2c5 100644
--- a/vb_suite/reshape.py
+++ b/vb_suite/reshape.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
index = MultiIndex.from_arrays([np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)])
df = DataFrame(np.random.randn(10000, 4), index=index)
diff --git a/vb_suite/series_methods.py b/vb_suite/series_methods.py
index d0c31cb04ca6a..cd8688495fa09 100644
--- a/vb_suite/series_methods.py
+++ b/vb_suite/series_methods.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
setup = common_setup + """
diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py
index 5da06451fe2d1..53e2778ee0865 100644
--- a/vb_suite/sparse.py
+++ b/vb_suite/sparse.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/stat_ops.py b/vb_suite/stat_ops.py
index 544ad6d00ed37..8d7c30dc9fdcf 100644
--- a/vb_suite/stat_ops.py
+++ b/vb_suite/stat_ops.py
@@ -1,7 +1,7 @@
from vbench.benchmark import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
#----------------------------------------------------------------------
diff --git a/vb_suite/strings.py b/vb_suite/strings.py
index f229e0ddedbae..0948df5673a0d 100644
--- a/vb_suite/strings.py
+++ b/vb_suite/strings.py
@@ -1,6 +1,6 @@
from vbench.api import Benchmark
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
"""
setup = common_setup + """
@@ -9,11 +9,11 @@
def make_series(letters, strlen, size):
return Series(
- np.fromiter(IT.cycle(letters), count=size*strlen, dtype='|S1')
- .view('|S{}'.format(strlen)))
+ [str(x) for x in np.fromiter(IT.cycle(letters), count=size*strlen, dtype='|S1')
+ .view('|S{}'.format(strlen))])
-many = make_series('matchthis'+string.uppercase, strlen=19, size=10000) # 31% matches
-few = make_series('matchthis'+string.uppercase*42, strlen=19, size=10000) # 1% matches
+many = make_series('matchthis'+string.ascii_uppercase, strlen=19, size=10000) # 31% matches
+few = make_series('matchthis'+string.ascii_uppercase*42, strlen=19, size=10000) # 1% matches
"""
strings_cat = Benchmark("many.str.cat(sep=',')", setup)
@@ -47,7 +47,7 @@ def make_series(letters, strlen, size):
strings_get = Benchmark("many.str.get(0)", setup)
setup = setup + """
-s = make_series(string.uppercase, strlen=10, size=10000).str.join('|')
+s = make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|')
"""
strings_get_dummies = Benchmark("s.str.get_dummies('|')", setup)
diff --git a/vb_suite/suite.py b/vb_suite/suite.py
index ca7a4a9b70836..70a6278c0852d 100644
--- a/vb_suite/suite.py
+++ b/vb_suite/suite.py
@@ -136,7 +136,7 @@ def generate_rst_files(benchmarks):
These historical benchmark graphs were produced with `vbench
<http://github.com/pydata/vbench>`__.
-The ``pandas_vb_common`` setup script can be found here_
+The ``.pandas_vb_common`` setup script can be found here_
.. _here: https://github.com/pydata/pandas/tree/master/vb_suite
diff --git a/vb_suite/timedelta.py b/vb_suite/timedelta.py
index febd70739b2c9..378968ea1379a 100644
--- a/vb_suite/timedelta.py
+++ b/vb_suite/timedelta.py
@@ -1,7 +1,7 @@
from vbench.api import Benchmark
from datetime import datetime
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
from pandas import to_timedelta
"""
diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py
index 6a99bd0dfdc65..7e10b333d5c56 100644
--- a/vb_suite/timeseries.py
+++ b/vb_suite/timeseries.py
@@ -11,7 +11,7 @@ def date_range(start=None, end=None, periods=None, freq=None):
return DatetimeIndex(start=start, end=end, periods=periods, offset=freq)
-common_setup = """from pandas_vb_common import *
+common_setup = """from .pandas_vb_common import *
from datetime import timedelta
N = 100000
@@ -312,7 +312,10 @@ def date_range(start=None, end=None, periods=None, freq=None):
setup = common_setup + """
import datetime as dt
import pandas as pd
-import pandas.tseries.holiday
+try:
+ import pandas.tseries.holiday
+except ImportError:
+ pass
import numpy as np
date = dt.datetime(2011,1,1)
@@ -417,9 +420,9 @@ def iter_n(iterable, n=None):
setup = common_setup + """
N = 100000
idx1 = date_range(start='20140101', freq='T', periods=N)
-delta_offset = Day()
-fast_offset = DateOffset(months=2, days=2)
-slow_offset = offsets.BusinessDay()
+delta_offset = pd.offsets.Day()
+fast_offset = pd.offsets.DateOffset(months=2, days=2)
+slow_offset = pd.offsets.BusinessDay()
"""
@@ -431,9 +434,9 @@ def iter_n(iterable, n=None):
setup = common_setup + """
N = 100000
s = Series(date_range(start='20140101', freq='T', periods=N))
-delta_offset = Day()
-fast_offset = DateOffset(months=2, days=2)
-slow_offset = offsets.BusinessDay()
+delta_offset = pd.offsets.Day()
+fast_offset = pd.offsets.DateOffset(months=2, days=2)
+slow_offset = pd.offsets.BusinessDay()
"""
| @jorisvandenbossche As you requested in #10849, here's the current state of affairs. The changes are:
- First patch:
- Fix the translation script so that functions defined in the vbench `setup` string get defined as classmethods and called appropriately. This fixes most failing tests.
- Some backwards-compatibility fixes around imports. Raising `NotImplementedError` inside `setup()` is equivalent to `SkipTest`, so I've tried to do that in a couple necessary cases (such as things using `test_parallel`)
- Speaking of `test_parallel`, this required a bit of hackery to support not-existing. I implemented a no-op decorator by that name so that the actual definition of test cases wouldn't fail. We simply raise `NotImplementedError` rather than test without it, however.
- Second patch:
- Support Python 3, the changes are essentially what I mentioned elsewhere.
- `from pandas_vb_common import *` -> `from .pandas_vb_common import *`
- Replace `xrange` with `range` and add `from pandas.compat import range` in `pandas_vb_common`
- Replace `string.uppercase` with `string.ascii_uppercase`
- Third patch
- `asv` instructions in `contributing.rst`, largely lifted from the vbench description. The current examples are minimalist, but most comparable to what vbench outputs.
- Fourth patch:
- Re-run the translation script. I think some other additions may be included in the first patch due to having run it a few times while making changes.
cc @jreback @TomAugspurger
| https://api.github.com/repos/pandas-dev/pandas/pulls/10928 | 2015-08-29T08:40:42Z | 2015-08-29T22:42:36Z | 2015-08-29T22:42:36Z | 2015-09-01T19:25:40Z |
PERF: GH10213 kth_smallest GIL release | diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 556dd2c364cdf..4b82781fc39d9 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -298,4 +298,25 @@ def take_1d_pg2_int64(self):
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
- com.take_1d(self.df.float64.values, self.indexer)
\ No newline at end of file
+ com.take_1d(self.df.float64.values, self.indexer)
+
+
+class nogil_kth_smallest(object):
+ number = 1
+ repeat = 5
+
+ def setup(self):
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+ np.random.seed(1234)
+ self.N = 10000000
+ self.k = 500000
+ self.a = np.random.randn(self.N)
+ self.b = self.a.copy()
+ self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]
+
+ def time_nogil_kth_smallest(self):
+ @test_parallel(num_threads=2, kwargs_list=self.kwargs_list)
+ def run(arr):
+ algos.kth_smallest(arr, self.k)
+ run()
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index a1326d63a112a..3370131929c22 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -7,6 +7,7 @@
import pandas.util.testing as tm
import random
import numpy as np
+import threading
try:
from pandas.compat import range
except ImportError:
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index e9d39e0441055..b9909c14b592f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -69,14 +69,15 @@ Releasing the GIL
We are releasing the global-interpreter-lock (GIL) on some cython operations.
This will allow other threads to run simultaneously during computation, potentially allowing performance improvements
-from multi-threading. Notably ``groupby`` and some indexing operations are a benefit from this. (:issue:`8882`)
+from multi-threading. Notably ``groupby``, ``nsmallest`` and some indexing operations benefit from this. (:issue:`8882`)
For example the groupby expression in the following code will have the GIL released during the factorization step, e.g. ``df.groupby('key')``
as well as the ``.sum()`` operation.
.. code-block:: python
- N = 1e6
+ N = 1000000
+ ngroups = 10
df = DataFrame({'key' : np.random.randint(0,ngroups,size=N),
'data' : np.random.randn(N) })
df.groupby('key')['data'].sum()
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 9b6bdf57d4509..44b1996272356 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -740,7 +740,7 @@ ctypedef fused numeric:
float64_t
-cdef inline Py_ssize_t swap(numeric *a, numeric *b) except -1:
+cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil except -1:
cdef numeric t
# cython doesn't allow pointer dereference so use array syntax
@@ -756,27 +756,27 @@ cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k):
cdef:
Py_ssize_t i, j, l, m, n = a.size
numeric x
-
- l = 0
- m = n - 1
-
- while l < m:
- x = a[k]
- i = l
- j = m
-
- while 1:
- while a[i] < x: i += 1
- while x < a[j]: j -= 1
- if i <= j:
- swap(&a[i], &a[j])
- i += 1; j -= 1
-
- if i > j: break
-
- if j < k: l = i
- if k < i: m = j
- return a[k]
+ with nogil:
+ l = 0
+ m = n - 1
+
+ while l < m:
+ x = a[k]
+ i = l
+ j = m
+
+ while 1:
+ while a[i] < x: i += 1
+ while x < a[j]: j -= 1
+ if i <= j:
+ swap(&a[i], &a[j])
+ i += 1; j -= 1
+
+ if i > j: break
+
+ if j < k: l = i
+ if k < i: m = j
+ return a[k]
cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index e3633a1ec4360..aaa83da036c2f 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2044,14 +2044,16 @@ def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
if inspect.isfunction(obj) and name.startswith('assert'):
setattr(TestCase, name, staticmethod(obj))
-def test_parallel(num_threads=2):
+
+def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
-
+ kwargs_list : list of dicts, optional
+ The list of kwargs to update original function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
@@ -2061,14 +2063,23 @@ def test_parallel(num_threads=2):
"""
assert num_threads > 0
+ has_kwargs_list = kwargs_list is not None
+ if has_kwargs_list:
+ assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
+ if has_kwargs_list:
+ update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
+ else:
+ update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
- thread = threading.Thread(target=func, args=args, kwargs=kwargs)
+ updated_kwargs = update_kwargs(i)
+ thread = threading.Thread(target=func, args=args,
+ kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
| One tiny part of #10213
```
import timeit
setup_seq = '''
import pandas
import numpy
numpy.random.seed(1234)
x = numpy.random.randn(10000000)
a = x.copy()
b = x.copy()
def f(s):
pandas.algos.kth_smallest(s, 500000)
def seq():
f(a)
f(b)
'''
setup_parallel = '''
import pandas
import numpy
import threading
numpy.random.seed(1234)
x = numpy.random.randn(10000000)
a = x.copy()
b = x.copy()
def f(s):
pandas.algos.kth_smallest(s, 500000)
def parallel():
thread1 = threading.Thread(target=f, args=(a,))
thread2 = threading.Thread(target=f, args=(b,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
'''
print(min(timeit.repeat(stmt='seq()', setup=setup_seq, repeat=100, number=1)))
print(min(timeit.repeat(stmt='parallel()', setup=setup_parallel, repeat=100, number=1)))
```
On master
```
0.15268295999976544
0.15424678300041705
```
On branch
```
0.1544521670002723
0.08813380599985976
```
Testing of `nsmallest`/`nlargest`/`median` (I don't think `median` calls `kth_smallest` though)
```
import pandas, numpy
from pandas.util.testing import test_parallel
n = 1000000
k = 50000
numpy.random.seed(1234)
s = pandas.Series(numpy.random.randn(n))
def f():
s.nlargest(k)
def seq():
f()
f()
@test_parallel(num_threads=2)
def g():
f()
```
Master `nsmallest`
```
In [2]: %timeit f()
10 loops, best of 3: 42.4 ms per loop
In [3]: %timeit g()
10 loops, best of 3: 79.9 ms per loop
In [4]: %timeit seq()
10 loops, best of 3: 84.9 ms per loop
```
Branch `nsmallest`
```
In [10]: %timeit f()
10 loops, best of 3: 42.8 ms per loop
In [11]: %timeit g()
10 loops, best of 3: 68.6 ms per loop
In [12]: %timeit seq()
10 loops, best of 3: 91.2 ms per loop
```
Master `nlargest`
```
In [2]: %timeit f()
10 loops, best of 3: 47.5 ms per loop
In [3]: %timeit g()
10 loops, best of 3: 86 ms per loop
In [4]: %timeit seq()
```
Branch `nlargest`
```
In [10]: %timeit f()
10 loops, best of 3: 48.1 ms per loop
In [11]: %timeit g()
10 loops, best of 3: 71 ms per loop
In [12]: %timeit seq()
10 loops, best of 3: 95.7 ms per loop
```
Master `median`
```
In [2]: %timeit f()
100 loops, best of 3: 15.4 ms per loop
In [3]: %timeit g()
10 loops, best of 3: 20.7 ms per loop
In [4]: %timeit seq()
10 loops, best of 3: 30.7 ms per loop
```
Branch `median`
```
In [12]: %timeit f()
100 loops, best of 3: 15 ms per loop
In [13]: %timeit g()
10 loops, best of 3: 21.5 ms per loop
In [14]: %timeit seq()
10 loops, best of 3: 30 ms per loop
```
Results are pretty in line with expectations -- `nsmallest` does quite a bit more than `kth_smallest`, such as copying and indexing.
| https://api.github.com/repos/pandas-dev/pandas/pulls/10927 | 2015-08-29T06:37:53Z | 2015-09-02T11:49:29Z | 2015-09-02T11:49:29Z | 2015-09-03T19:14:47Z |
BUG: Bug in incorrection computation of .mean() on timedelta64[ns] because of overflow #9442 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 1607d81543946..3e81a923a114c 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -725,6 +725,7 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`)
- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`)
- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 245535e47abd8..72ea6d14456b0 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -63,6 +63,7 @@ def __str__(self):
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
+_int64_max = np.iinfo(np.int64).max
# define abstract base classes to enable isinstance type checking on our
# objects
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index c70fb6339517d..447a273a1e171 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -21,7 +21,8 @@
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype, _get_dtype,
- is_int_or_datetime_dtype, is_any_int_dtype)
+ is_int_or_datetime_dtype, is_any_int_dtype,
+ _int64_max)
class disallow(object):
@@ -145,7 +146,7 @@ def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
else:
if fill_value_typ == '+inf':
# need the max int here
- return np.iinfo(np.int64).max
+ return _int64_max
else:
return tslib.iNaT
@@ -223,7 +224,12 @@ def _wrap_results(result, dtype):
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
- result = lib.Timedelta(result)
+
+ # raise if we have a timedelta64[ns] which is too large
+ if np.fabs(result) > _int64_max:
+ raise ValueError("overflow in timedelta operation")
+
+ result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
@@ -247,6 +253,8 @@ def nansum(values, axis=None, skipna=True):
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
+ elif is_timedelta64_dtype(dtype):
+ dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
@@ -260,7 +268,7 @@ def nanmean(values, axis=None, skipna=True):
dtype_sum = dtype_max
dtype_count = np.float64
- if is_integer_dtype(dtype):
+ if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py
index 753e76fd1faea..4870fbd55f33e 100644
--- a/pandas/tseries/tests/test_timedeltas.py
+++ b/pandas/tseries/tests/test_timedeltas.py
@@ -686,6 +686,25 @@ def test_timedelta_ops(self):
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
+ def test_overflow(self):
+ # GH 9442
+ s = Series(pd.date_range('20130101',periods=100000,freq='H'))
+ s[0] += pd.Timedelta('1s 1ms')
+
+ # mean
+ result = (s-s.min()).mean()
+ expected = pd.Timedelta((pd.DatetimeIndex((s-s.min())).asi8/len(s)).sum())
+
+ # the computation is converted to float so might be some loss of precision
+ self.assertTrue(np.allclose(result.value/1000, expected.value/1000))
+
+ # sum
+ self.assertRaises(ValueError, lambda : (s-s.min()).sum())
+ s1 = s[0:10000]
+ self.assertRaises(ValueError, lambda : (s1-s1.min()).sum())
+ s2 = s[0:1000]
+ result = (s2-s2.min()).sum()
+
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
| closes #9442
| https://api.github.com/repos/pandas-dev/pandas/pulls/10926 | 2015-08-29T02:03:26Z | 2015-08-29T13:12:39Z | 2015-08-29T13:12:39Z | 2015-08-29T13:12:39Z |
BUG: Bug in clearing the cache on DataFrame.pop and a subsequent inplace op #10912 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 71aac42b17810..0ccfa06fc8844 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -750,7 +750,7 @@ Bug Fixes
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
- Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`)
-
+- Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`)
- Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b127fb220569d..fe09e03281b4f 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -501,6 +501,11 @@ def pop(self, item):
"""
result = self[item]
del self[item]
+ try:
+ result._reset_cacher()
+ except AttributeError:
+ pass
+
return result
def squeeze(self):
@@ -1094,6 +1099,11 @@ def _set_as_cached(self, item, cacher):
a weakref to cacher """
self._cacher = (item, weakref.ref(cacher))
+ def _reset_cacher(self):
+ """ reset the cacher """
+ if hasattr(self,'_cacher'):
+ del self._cacher
+
def _iget_item_cache(self, item):
""" return the cached item, item represents a positional indexer """
ax = self._info_axis
@@ -1330,6 +1340,7 @@ def __delitem__(self, key):
# exception:
self._data.delete(key)
+ # delete from the caches
try:
del self._item_cache[key]
except KeyError:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index fe78ff0f79ff3..58c6d15f8ada5 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5177,6 +5177,20 @@ def test_pop(self):
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
+ # 10912
+ # inplace ops cause caching issue
+ a = DataFrame([[1,2,3],[4,5,6]], columns=['A','B','C'], index=['X','Y'])
+ b = a.pop('B')
+ b += 1
+
+ # original frame
+ expected = DataFrame([[1,3],[4,6]], columns=['A','C'], index=['X','Y'])
+ assert_frame_equal(a, expected)
+
+ # result
+ expected = Series([2,5],index=['X','Y'],name='B')+1
+ assert_series_equal(b, expected)
+
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
| closes #10912
| https://api.github.com/repos/pandas-dev/pandas/pulls/10922 | 2015-08-28T18:36:05Z | 2015-08-28T23:28:06Z | 2015-08-28T23:28:06Z | 2015-08-28T23:28:06Z |
DEPR: Series.nlargest/nsmallest take_last. | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index c18bedd0cf6eb..8fb738ff7d76d 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -660,6 +660,7 @@ Deprecations
- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`).
- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`)
+- ``Series.nsmallest`` and ``nlargest``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`10792`)
- ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They
can easily be replaced by using the ``add`` and ``mul`` methods:
``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)``
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 250b4b3e562b8..36d31d493b10d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -453,24 +453,24 @@ def group_position(*args):
_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}
-def _finalize_nsmallest(arr, kth_val, n, take_last, narr):
+def _finalize_nsmallest(arr, kth_val, n, keep, narr):
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
-
- if take_last:
+ if keep == 'last':
# reverse indices
return narr - 1 - inds
- return inds
+ else:
+ return inds
-def nsmallest(arr, n, take_last=False):
+def nsmallest(arr, n, keep='first'):
'''
Find the indices of the n smallest values of a numpy array.
Note: Fails silently with NaN.
'''
- if take_last:
+ if keep == 'last':
arr = arr[::-1]
narr = len(arr)
@@ -480,10 +480,10 @@ def nsmallest(arr, n, take_last=False):
arr = arr.view(_dtype_map.get(sdtype, sdtype))
kth_val = algos.kth_smallest(arr.copy(), n - 1)
- return _finalize_nsmallest(arr, kth_val, n, take_last, narr)
+ return _finalize_nsmallest(arr, kth_val, n, keep, narr)
-def nlargest(arr, n, take_last=False):
+def nlargest(arr, n, keep='first'):
"""
Find the indices of the n largest values of a numpy array.
@@ -491,11 +491,11 @@ def nlargest(arr, n, take_last=False):
"""
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
- return nsmallest(-arr, n, take_last=take_last)
+ return nsmallest(-arr, n, keep=keep)
-def select_n_slow(dropped, n, take_last, method):
- reverse_it = take_last or method == 'nlargest'
+def select_n_slow(dropped, n, keep, method):
+ reverse_it = (keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
@@ -504,13 +504,13 @@ def select_n_slow(dropped, n, take_last, method):
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
-def select_n(series, n, take_last, method):
+def select_n(series, n, keep, method):
"""Implement n largest/smallest.
Parameters
----------
n : int
- take_last : bool
+ keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
Returns
@@ -522,15 +522,18 @@ def select_n(series, n, take_last, method):
np.timedelta64)):
raise TypeError("Cannot use method %r with dtype %s" % (method, dtype))
+ if keep not in ('first', 'last'):
+ raise ValueError('keep must be either "first", "last"')
+
if n <= 0:
return series[[]]
dropped = series.dropna()
if n >= len(series):
- return select_n_slow(dropped, n, take_last, method)
+ return select_n_slow(dropped, n, keep, method)
- inds = _select_methods[method](dropped.values, n, take_last)
+ inds = _select_methods[method](dropped.values, n, keep)
return dropped.iloc[inds]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e908bf9d579b..3abf7c4458854 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3163,16 +3163,16 @@ def sortlevel(self, level=0, axis=0, ascending=True,
inplace=inplace, sort_remaining=sort_remaining)
- def _nsorted(self, columns, n, method, take_last):
+ def _nsorted(self, columns, n, method, keep):
if not com.is_list_like(columns):
columns = [columns]
columns = list(columns)
- ser = getattr(self[columns[0]], method)(n, take_last=take_last)
+ ser = getattr(self[columns[0]], method)(n, keep=keep)
ascending = dict(nlargest=False, nsmallest=True)[method]
return self.loc[ser.index].sort_values(columns, ascending=ascending,
kind='mergesort')
- def nlargest(self, n, columns, take_last=False):
+ def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
@@ -3184,8 +3184,10 @@ def nlargest(self, n, columns, take_last=False):
Number of items to retrieve
columns : list or str
Column name or names to order by
- take_last : bool, optional
- Where there are duplicate values, take the last duplicate
+ keep : {'first', 'last', False}, default 'first'
+ Where there are duplicate values:
+ - ``first`` : take the first occurrence.
+ - ``last`` : take the last occurrence.
Returns
-------
@@ -3202,9 +3204,9 @@ def nlargest(self, n, columns, take_last=False):
1 10 b 2
2 8 d NaN
"""
- return self._nsorted(columns, n, 'nlargest', take_last)
+ return self._nsorted(columns, n, 'nlargest', keep)
- def nsmallest(self, n, columns, take_last=False):
+ def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
@@ -3216,8 +3218,10 @@ def nsmallest(self, n, columns, take_last=False):
Number of items to retrieve
columns : list or str
Column name or names to order by
- take_last : bool, optional
- Where there are duplicate values, take the last duplicate
+ keep : {'first', 'last', False}, default 'first'
+ Where there are duplicate values:
+ - ``first`` : take the first occurrence.
+ - ``last`` : take the last occurrence.
Returns
-------
@@ -3234,7 +3238,7 @@ def nsmallest(self, n, columns, take_last=False):
0 1 a 1
2 8 d NaN
"""
- return self._nsorted(columns, n, 'nsmallest', take_last)
+ return self._nsorted(columns, n, 'nsmallest', keep)
def swaplevel(self, i, j, axis=0):
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 444f149e70e34..8adaf1437c1de 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -19,7 +19,8 @@
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
-from pandas.util.decorators import cache_readonly, Appender, make_signature
+from pandas.util.decorators import (cache_readonly, Appender, make_signature,
+ deprecate_kwarg)
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
@@ -82,7 +83,7 @@
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
- frozenset(['dtype', 'unique', 'nlargest', 'nsmallest'])
+ frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
@@ -2583,6 +2584,19 @@ def nunique(self, dropna=True):
index=self.grouper.result_index,
name=self.name)
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @Appender(Series.nlargest.__doc__)
+ def nlargest(self, n=5, keep='first'):
+ # ToDo: When we remove deprecate_kwargs, we can remote these methods
+ # and inlucde nlargest and nsmallest to _series_apply_whitelist
+ return self.apply(lambda x: x.nlargest(n=n, keep=keep))
+
+
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ @Appender(Series.nsmallest.__doc__)
+ def nsmallest(self, n=5, keep='first'):
+ return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
+
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b4fc1c9c48f27..2890730956c75 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1817,15 +1817,19 @@ def rank(self, method='average', na_option='keep', ascending=True,
ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
- def nlargest(self, n=5, take_last=False):
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ def nlargest(self, n=5, keep='first'):
"""Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
- take_last : bool
- Where there are duplicate values, take the last duplicate
+ keep : {'first', 'last', False}, default 'first'
+ Where there are duplicate values:
+ - ``first`` : take the first occurrence.
+ - ``last`` : take the last occurrence.
+ take_last : deprecated
Returns
-------
@@ -1848,17 +1852,21 @@ def nlargest(self, n=5, take_last=False):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
- return select_n(self, n=n, take_last=take_last, method='nlargest')
+ return select_n(self, n=n, keep=keep, method='nlargest')
- def nsmallest(self, n=5, take_last=False):
+ @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
+ def nsmallest(self, n=5, keep='first'):
"""Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
- take_last : bool
- Where there are duplicate values, take the last duplicate
+ keep : {'first', 'last', False}, default 'first'
+ Where there are duplicate values:
+ - ``first`` : take the first occurrence.
+ - ``last`` : take the last occurrence.
+ take_last : deprecated
Returns
-------
@@ -1881,7 +1889,7 @@ def nsmallest(self, n=5, take_last=False):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
- return select_n(self, n=n, take_last=take_last, method='nsmallest')
+ return select_n(self, n=n, keep=keep, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index a8bbc372ebe25..41703b3b5a3b7 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -4997,7 +4997,7 @@ def test_groupby_whitelist(self):
'corr', 'cov',
'diff',
'unique',
- 'nlargest', 'nsmallest',
+ # 'nlargest', 'nsmallest',
])
for obj, whitelist in zip((df, s),
@@ -5316,6 +5316,16 @@ def test_nlargest(self):
[3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
+
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
+ gb = a.groupby(b)
+ e = Series([3, 2, 1, 3, 3, 2],
+ index=MultiIndex.from_arrays([list('aaabbb'),
+ [2, 3, 1, 6, 5, 7]]))
+ assert_series_equal(gb.nlargest(3, keep='last'), e)
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(gb.nlargest(3, take_last=True), e)
+
def test_nsmallest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
@@ -5326,6 +5336,15 @@ def test_nsmallest(self):
[0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
+ a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
+ gb = a.groupby(b)
+ e = Series([0, 1, 1, 0, 1, 2],
+ index=MultiIndex.from_arrays([list('aaabbb'),
+ [4, 1, 0, 9, 8, 7]]))
+ assert_series_equal(gb.nsmallest(3, keep='last'), e)
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(gb.nsmallest(3, take_last=True), e)
+
def test_transform_doesnt_clobber_ints(self):
# GH 7972
n = 6
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index a429059c761d6..34ea674fe10c0 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5040,11 +5040,16 @@ def test_nsmallest_nlargest(self):
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
- assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
+
+ assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
- assert_series_equal(s.nlargest(3, take_last=True),
- s.iloc[[4, 0, 3]])
+
+ assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])
+ with tm.assert_produces_warning(FutureWarning):
+ assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
@@ -5062,6 +5067,12 @@ def test_nsmallest_nlargest(self):
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
+ msg = 'keep must be either "first", "last"'
+ with tm.assertRaisesRegexp(ValueError, msg):
+ s.nsmallest(keep='invalid')
+ with tm.assertRaisesRegexp(ValueError, msg):
+ s.nlargest(keep='invalid')
+
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
| Closes #10792.
Because using `deprecate_kwargs` hides original impl, used ugly workaround for `GroupBy`...
| https://api.github.com/repos/pandas-dev/pandas/pulls/10920 | 2015-08-28T13:34:45Z | 2015-08-29T00:03:43Z | 2015-08-29T00:03:43Z | 2015-08-29T00:10:30Z |
BUG: closes bug in BinGrouper.group_info where returned values are not compatible with base class | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index c18bedd0cf6eb..d3d7fe1637900 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -747,6 +747,7 @@ Bug Fixes
- Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`)
- Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`)
- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`)
+- Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`)
- Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 444f149e70e34..fae54fa298e85 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -1790,8 +1790,10 @@ def indices(self):
@cache_readonly
def group_info(self):
- # for compat
- return self.bins, self.binlabels, self.ngroups
+ ngroups = self.ngroups
+ obs_group_ids = np.arange(ngroups)
+ comp_ids = np.repeat(np.arange(ngroups), np.diff(np.r_[0, self.bins]))
+ return comp_ids, obs_group_ids, ngroups
@cache_readonly
def ngroups(self):
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 7dafc88bf9239..0bee6f514cad0 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -916,6 +916,31 @@ def test_resample_timegrouper(self):
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
+ def test_resample_group_info(self): # GH10914
+ for n, k in product((10000, 100000), (10, 100, 1000)):
+ dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
+ ts = Series(np.random.randint(0, n // k, n),
+ index=np.random.choice(dr, n))
+
+ left = ts.resample('30T', how='nunique')
+ ix = date_range(start=ts.index.min(),
+ end=ts.index.max(),
+ freq='30T')
+
+ vals = ts.values
+ bins = np.searchsorted(ix.values, ts.index, side='right')
+
+ sorter = np.lexsort((vals, bins))
+ vals, bins = vals[sorter], bins[sorter]
+
+ mask = np.r_[True, vals[1:] != vals[:-1]]
+ mask |= np.r_[True, bins[1:] != bins[:-1]]
+
+ arr = np.bincount(bins[mask] - 1, minlength=len(ix))
+ right = Series(arr, index=ix)
+
+ assert_series_equal(left, right)
+
def test_resmaple_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
| closes https://github.com/pydata/pandas/issues/10914
| https://api.github.com/repos/pandas-dev/pandas/pulls/10918 | 2015-08-28T03:24:08Z | 2015-08-28T12:22:23Z | 2015-08-28T12:22:23Z | 2015-08-29T12:39:08Z |
PERF: perf improvements in drop_duplicates for integer dtyped arrays | diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index d6fbd0d31c389..03e654b4886cc 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -61,6 +61,19 @@ def time_frame_drop_duplicates(self):
self.df.drop_duplicates(['key1', 'key2'])
+class frame_drop_duplicates_int(object):
+
+ def setup(self):
+ np.random.seed(1234)
+ self.N = 1000000
+ self.K = 10000
+ self.key1 = np.random.randint(0,self.K,size=self.N)
+ self.df = DataFrame({'key1': self.key1})
+
+ def time_frame_drop_duplicates_int(self):
+ self.df.drop_duplicates()
+
+
class frame_drop_duplicates_na(object):
goal_time = 0.2
@@ -381,4 +394,4 @@ def setup(self):
self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10))
def time_series_drop_duplicates_string(self):
- self.s2.drop_duplicates()
\ No newline at end of file
+ self.s2.drop_duplicates()
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d3d7fe1637900..33abc62b3f973 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -722,7 +722,7 @@ Performance Improvements
- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`)
- Performance improvements in ``Categorical.value_counts`` (:issue:`10804`)
- Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`)
-
+- Performance improvements in ``DataFrame.drop_duplicates`` with integer dtypes (:issue:`10917`)
- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`)
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3e908bf9d579b..af2959e86274f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2962,7 +2962,13 @@ def duplicated(self, subset=None, keep='first'):
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
- labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
+
+ # if we have integers we can directly index with these
+ if com.is_integer_dtype(vals):
+ from pandas.core.nanops import unique1d
+ labels, shape = vals, unique1d(vals)
+ else:
+ labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8',copy=False), len(shape)
if subset is None:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 16143fa612c48..693b761ae7b4b 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7975,6 +7975,22 @@ def test_drop_duplicates(self):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
+ # integers
+ result = df.drop_duplicates('C')
+ expected = df.iloc[[0,2]]
+ assert_frame_equal(result, expected)
+ result = df.drop_duplicates('C',keep='last')
+ expected = df.iloc[[-2,-1]]
+ assert_frame_equal(result, expected)
+
+ df['E'] = df['C'].astype('int8')
+ result = df.drop_duplicates('E')
+ expected = df.iloc[[0,2]]
+ assert_frame_equal(result, expected)
+ result = df.drop_duplicates('E',keep='last')
+ expected = df.iloc[[-2,-1]]
+ assert_frame_equal(result, expected)
+
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
| no need to factorize integers when dropping duplicates.
master
```
In [1]: np.random.seed(1234)
In [2]: df = DataFrame({'A' : np.random.randint(0,10000,size=1000000)})
In [3]: %timeit df.drop_duplicates()
10 loops, best of 3: 36.9 ms per loop
```
PR
```
In [2]: df = DataFrame({'A' : np.random.randint(0,10000,size=1000000)})
In [3]: %timeit df.drop_duplicates()
10 loops, best of 3: 21.6 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10917 | 2015-08-28T03:14:24Z | 2015-08-28T18:18:58Z | 2015-08-28T18:18:58Z | 2015-08-28T18:18:58Z |
Revert "Merge pull request #10727 from jorisvandenbossche/sphinx-traceback | diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 1888345e1055c..850f59c2713eb 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -661,14 +661,18 @@ values NOT in the categories, similarly to how you can reindex ANY pandas index.
Reshaping and Comparision operations on a ``CategoricalIndex`` must have the same categories
or a ``TypeError`` will be raised.
- .. ipython:: python
- :okexcept:
+ .. code-block:: python
+
+ In [9]: df3 = pd.DataFrame({'A' : np.arange(6),
+ 'B' : pd.Series(list('aabbca')).astype('category')})
+
+ In [11]: df3 = df3.set_index('B')
+
+ In [11]: df3.index
+ Out[11]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'a'], categories=[u'a', u'b', u'c'], ordered=False, name=u'B', dtype='category')
- df3 = pd.DataFrame({'A' : np.arange(6),
- 'B' : pd.Series(list('aabbca')).astype('category')})
- df3 = df3.set_index('B')
- df3.index
- pd.concat([df2, df3]
+ In [12]: pd.concat([df2, df3]
+ TypeError: categories must match existing categories when appending
.. _indexing.float64index:
@@ -734,18 +738,20 @@ In float indexes, slicing using floats is allowed
In non-float indexes, slicing using floats will raise a ``TypeError``
-.. ipython:: python
- :okexcept:
+.. code-block:: python
+
+ In [1]: pd.Series(range(5))[3.5]
+ TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index)
- pd.Series(range(5))[3.5]
- pd.Series(range(5))[3.5:4.5]
+ In [1]: pd.Series(range(5))[3.5:4.5]
+ TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index)
Using a scalar float indexer will be deprecated in a future version, but is allowed for now.
-.. ipython:: python
- :okwarning:
+.. code-block:: python
- pd.Series(range(5))[3.0]
+ In [3]: pd.Series(range(5))[3.0]
+ Out[3]: 3
Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat
irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 6bfbfb87f2c55..956c90ae63034 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -352,11 +352,13 @@ objects of the same length:
Trying to compare ``Index`` or ``Series`` objects of different lengths will
raise a ValueError:
-.. ipython:: python
- :okexcept:
+.. code-block:: python
+
+ In [55]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar'])
+ ValueError: Series lengths must match to compare
- pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar'])
- pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo'])
+ In [56]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo'])
+ ValueError: Series lengths must match to compare
Note that this is different from the numpy behavior where a comparison can
be broadcast:
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index 847044c4745f9..5a62e7dccea34 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -143,10 +143,10 @@ label:
If a label is not contained, an exception is raised:
-.. ipython:: python
- :okexcept:
+.. code-block:: python
- s['f']
+ >>> s['f']
+ KeyError: 'f'
Using the ``get`` method, a missing label will return None or specified default:
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index a49a4745f7200..38629ee7baaea 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -293,10 +293,10 @@ Selection By Label
dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5))
dfl
- .. ipython:: python
- :okexcept:
+ .. code-block:: python
- dfl.loc[2:3]
+ In [4]: dfl.loc[2:3]
+ TypeError: cannot do slice indexing on <class 'pandas.tseries.index.DatetimeIndex'> with these indexers [2] of <type 'int'>
String likes in slicing *can* be convertible to the type of the index and lead to natural slicing.
@@ -475,11 +475,13 @@ A single indexer that is out of bounds will raise an ``IndexError``.
A list of indexers where any element is out of bounds will raise an
``IndexError``
-.. ipython:: python
- :okexcept:
+.. code-block:: python
dfl.iloc[[4,5,6]]
+ IndexError: positional indexers are out-of-bounds
+
dfl.iloc[:,4]
+ IndexError: single positional indexer is out-of-bounds
.. _indexing.basics.partial_setting:
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 834b4b642c393..26871a11473de 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -57,7 +57,11 @@ The following will **not work** because it matches multiple option names, e.g.
.. ipython:: python
:okexcept:
- pd.get_option("column")
+ try:
+ pd.get_option("column")
+ except KeyError as e:
+ print(e)
+
**Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index a2067b9a37d55..6f30ff3f51ad5 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -205,9 +205,9 @@ Invalid Data
Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time):
.. ipython:: python
+ :okexcept:
# this is the default, raise when unparseable
- @okexcept
to_datetime(['2009/07/31', 'asd'], errors='raise')
# return the original input when unparseable
@@ -656,7 +656,7 @@ apply the offset to each element.
rng + DateOffset(months=2)
s + DateOffset(months=2)
s - DateOffset(months=2)
-
+
If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``,
``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be
used exactly like a ``Timedelta`` - see the
@@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the
td + Minute(15)
Note that some offsets (such as ``BQuarterEnd``) do not have a
-vectorized implementation. They can still be used but may
+vectorized implementation. They can still be used but may
calculate signficantly slower and will raise a ``PerformanceWarning``
.. ipython:: python
@@ -1702,13 +1702,13 @@ the top example will fail as it contains ambiguous times and the bottom will
infer the right offset.
.. ipython:: python
+ :okexcept:
rng_hourly = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
'11/06/2011 01:00', '11/06/2011 02:00',
'11/06/2011 03:00'])
# This will fail as there are ambiguous times
- @okexcept
rng_hourly.tz_localize('US/Eastern')
rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
rng_hourly_eastern.tolist()
diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py
index 04a9e804f9af2..ad7ada8e4eea3 100644
--- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py
+++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py
@@ -465,6 +465,10 @@ def process_input(self, data, input_prompt, lineno):
self.cout.seek(0)
output = self.cout.read()
+ if not is_suppress and not is_semicolon:
+ ret.append(output)
+ elif is_semicolon: # get spacing right
+ ret.append('')
# context information
filename = self.state.document.current_source
@@ -494,16 +498,6 @@ def process_input(self, data, input_prompt, lineno):
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
- # if :okexcept: has been specified, display shorter traceback
- if is_okexcept and "Traceback" in output:
- traceback = output.split('\n\n')
- output = traceback[-1]
-
- if not is_suppress and not is_semicolon:
- ret.append(output)
- elif is_semicolon: # get spacing right
- ret.append('')
-
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
| This reverts commit d40627398cf78347f5a49fa060bad1c40514908a, reversing
changes made to bd804aa75ac9b7a55c00101dbea01571cbdfd068.
@jorisvandenbossche reverting so that the docs build
pls rebsumit when you can
| https://api.github.com/repos/pandas-dev/pandas/pulls/10916 | 2015-08-28T03:06:21Z | 2015-08-28T03:08:45Z | 2015-08-28T03:08:45Z | 2015-08-29T00:17:38Z |
explain how to skip rows between header & data | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 9e7b9ad0b7582..9760f18dd837a 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -971,6 +971,57 @@ Parsing date components in multi-columns is faster with a format
In [36]: %timeit pd.to_datetime(ds)
1 loops, best of 3: 488 ms per loop
+Skip row between header and data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. ipython:: python
+
+ from io import StringIO
+ import pandas as pd
+
+ data = """;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ ;;;;
+ date;Param1;Param2;Param4;Param5
+ ;m²;°C;m²;m
+ ;;;;
+ 01.01.1990 00:00;1;1;2;3
+ 01.01.1990 01:00;5;3;4;5
+ 01.01.1990 02:00;9;5;6;7
+ 01.01.1990 03:00;13;7;8;9
+ 01.01.1990 04:00;17;9;10;11
+ 01.01.1990 05:00;21;11;12;13
+ """
+
+Option 1: pass rows explicitly to skiprows
+""""""""""""""""""""""""""""""""""""""""""
+
+.. ipython:: python
+
+ pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', skiprows=[11,12],
+ index_col=0, parse_dates=True, header=10)
+
+Option 2: read column names and then data
+"""""""""""""""""""""""""""""""""""""""""
+
+.. ipython:: python
+
+ pd.read_csv(StringIO(data.decode('UTF-8')), sep=';',
+ header=10, parse_dates=True, nrows=10).columns
+ columns = pd.read_csv(StringIO(data.decode('UTF-8')), sep=';',
+ header=10, parse_dates=True, nrows=10).columns
+ pd.read_csv(StringIO(data.decode('UTF-8')), sep=';',
+ header=12, parse_dates=True, names=columns)
+
+
+
.. _cookbook.sql:
SQL
| This is the result from
https://github.com/pydata/pandas/issues/10898
| https://api.github.com/repos/pandas-dev/pandas/pulls/10910 | 2015-08-27T09:50:17Z | 2015-11-10T15:29:21Z | 2015-11-10T15:29:21Z | 2015-11-10T15:29:29Z |
PERF: imporves performance in SeriesGroupBy.value_counts | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 61500bedcdcd4..9049d8de550d0 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -721,7 +721,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`)
- Performance improvements in ``Categorical.value_counts`` (:issue:`10804`)
-- Performance improvements in ``SeriesGroupBy.nunique`` (:issue:`10820`)
+- Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`)
- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`)
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 220e67c43e4be..444f149e70e34 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -82,7 +82,7 @@
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
- frozenset(['dtype', 'value_counts', 'unique', 'nlargest', 'nsmallest'])
+ frozenset(['dtype', 'unique', 'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
@@ -2583,6 +2583,108 @@ def nunique(self, dropna=True):
index=self.grouper.result_index,
name=self.name)
+ def value_counts(self, normalize=False, sort=True, ascending=False,
+ bins=None, dropna=True):
+
+ from functools import partial
+ from pandas.tools.tile import cut
+ from pandas.tools.merge import _get_join_indexers
+
+ if bins is not None and not np.iterable(bins):
+ # scalar bins cannot be done at top level
+ # in a backward compatible way
+ return self.apply(Series.value_counts,
+ normalize=normalize,
+ sort=sort,
+ ascending=ascending,
+ bins=bins)
+
+ ids, _, _ = self.grouper.group_info
+ val = self.obj.get_values()
+
+ # groupby removes null keys from groupings
+ mask = ids != -1
+ ids, val = ids[mask], val[mask]
+
+ if bins is None:
+ lab, lev = algos.factorize(val, sort=True)
+ else:
+ cat, bins = cut(val, bins, retbins=True)
+ # bins[:-1] for backward compat;
+ # o.w. cat.categories could be better
+ lab, lev, dropna = cat.codes, bins[:-1], False
+
+ sorter = np.lexsort((lab, ids))
+ ids, lab = ids[sorter], lab[sorter]
+
+ # group boundries are where group ids change
+ idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
+
+ # new values are where sorted labels change
+ inc = np.r_[True, lab[1:] != lab[:-1]]
+ inc[idx] = True # group boundries are also new values
+ out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
+
+ # num. of times each group should be repeated
+ rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
+
+ # multi-index components
+ labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
+ levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
+ names = self.grouper.names + [self.name]
+
+ if dropna:
+ mask = labels[-1] != -1
+ if mask.all():
+ dropna = False
+ else:
+ out, labels = out[mask], [label[mask] for label in labels]
+
+ if normalize:
+ out = out.astype('float')
+ acc = rep(np.diff(np.r_[idx, len(ids)]))
+ out /= acc[mask] if dropna else acc
+
+ if sort and bins is None:
+ cat = ids[inc][mask] if dropna else ids[inc]
+ sorter = np.lexsort((out if ascending else -out, cat))
+ out, labels[-1] = out[sorter], labels[-1][sorter]
+
+ if bins is None:
+ mi = MultiIndex(levels=levels, labels=labels, names=names,
+ verify_integrity=False)
+
+ return Series(out, index=mi)
+
+ # for compat. with algos.value_counts need to ensure every
+ # bin is present at every index level, null filled with zeros
+ diff = np.zeros(len(out), dtype='bool')
+ for lab in labels[:-1]:
+ diff |= np.r_[True, lab[1:] != lab[:-1]]
+
+ ncat, nbin = diff.sum(), len(levels[-1])
+
+ left = [np.repeat(np.arange(ncat), nbin),
+ np.tile(np.arange(nbin), ncat)]
+
+ right = [diff.cumsum() - 1, labels[-1]]
+
+ _, idx = _get_join_indexers(left, right, sort=False, how='left')
+ out = np.where(idx != -1, out[idx], 0)
+
+ if sort:
+ sorter = np.lexsort((out if ascending else -out, left[0]))
+ out, left[-1] = out[sorter], left[-1][sorter]
+
+ # build the multi-index w/ full levels
+ labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
+ labels.append(left[-1])
+
+ mi = MultiIndex(levels=levels, labels=labels, names=names,
+ verify_integrity=False)
+
+ return Series(out, index=mi)
+
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index e51a13d3a296f..a8bbc372ebe25 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1650,6 +1650,57 @@ def check_nunique(df, keys):
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
+ def test_series_groupby_value_counts(self):
+ from itertools import product
+
+ def rebuild_index(df):
+ arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
+ df.index = MultiIndex.from_arrays(arr, names=df.index.names)
+ return df
+
+ def check_value_counts(df, keys, bins):
+ for isort, normalize, sort, ascending, dropna \
+ in product((False, True), repeat=5):
+
+ kwargs = dict(normalize=normalize, sort=sort,
+ ascending=ascending, dropna=dropna,
+ bins=bins)
+
+ gr = df.groupby(keys, sort=isort)
+ left = gr['3rd'].value_counts(**kwargs)
+
+ gr = df.groupby(keys, sort=isort)
+ right = gr['3rd'].apply(Series.value_counts, **kwargs)
+ right.index.names = right.index.names[:-1] + ['3rd']
+
+ # have to sort on index because of unstable sort on values
+ left, right = map(rebuild_index, (left, right)) # xref GH9212
+ assert_series_equal(left.sort_index(), right.sort_index())
+
+ def loop(df):
+ bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2)
+ keys = '1st', '2nd', ('1st', '2nd')
+ for k, b in product(keys, bins):
+ check_value_counts(df, k, b)
+
+ days = date_range('2015-08-24', periods=10)
+
+ for n, m in product((100, 10000), (5, 20)):
+ frame = DataFrame({
+ '1st':np.random.choice(list('abcd'), n),
+ '2nd':np.random.choice(days, n),
+ '3rd':np.random.randint(1, m + 1, n)})
+
+ loop(frame)
+
+ frame.loc[1::11, '1st'] = nan
+ frame.loc[3::17, '2nd'] = nan
+ frame.loc[7::19, '3rd'] = nan
+ frame.loc[8::19, '3rd'] = nan
+ frame.loc[9::19, '3rd'] = nan
+
+ loop(frame)
+
def test_mulitindex_passthru(self):
# GH 7997
@@ -4944,7 +4995,6 @@ def test_groupby_whitelist(self):
'plot', 'hist',
'median', 'dtype',
'corr', 'cov',
- 'value_counts',
'diff',
'unique',
'nlargest', 'nsmallest',
| ``` -------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
groupby_ngroups_10000_value_counts | 11.4880 | 6698.5627 | 0.0017 |
groupby_ngroups_100_value_counts | 1.3397 | 69.8457 | 0.0192 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [d587be8] : PERF: imporves performance in SeriesGroupBy.value_counts
Base [a3c4b59] : TST: pythonxs link seems to have changed in test_html.py, skip tests
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10909 | 2015-08-27T02:26:36Z | 2015-08-27T03:16:19Z | 2015-08-27T03:16:19Z | 2015-08-27T11:19:39Z |
updated link to online documentation | diff --git a/pandas/info.py b/pandas/info.py
index 754741c117289..57ecd91739eab 100644
--- a/pandas/info.py
+++ b/pandas/info.py
@@ -2,7 +2,7 @@
pandas - a powerful data analysis and manipulation library for Python
=====================================================================
-See http://pandas.sourceforge.net for full documentation. Otherwise, see the
+See http://pandas.pydata.org/ for full documentation. Otherwise, see the
docstrings of the various objects in the pandas namespace:
Series
| https://api.github.com/repos/pandas-dev/pandas/pulls/10896 | 2015-08-24T17:13:34Z | 2015-08-24T17:19:01Z | 2015-08-24T17:19:01Z | 2015-08-24T17:20:36Z | |
PERF: improves SeriesGroupBy.nunique performance | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d30b7875e44b7..26fcbdca28ce7 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -679,6 +679,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`)
- Performance improvements in ``Categorical.value_counts`` (:issue:`10804`)
+- Performance improvements in ``SeriesGroupBy.nunique`` (:issue:`10820`)
- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`)
- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index d23cb39c15548..220e67c43e4be 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -82,8 +82,7 @@
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
- frozenset(['dtype', 'value_counts', 'unique', 'nunique',
- 'nlargest', 'nsmallest'])
+ frozenset(['dtype', 'value_counts', 'unique', 'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
@@ -2558,6 +2557,32 @@ def true_and_notnull(x, *args, **kwargs):
filtered = self._apply_filter(indices, dropna)
return filtered
+ def nunique(self, dropna=True):
+ ids, _, _ = self.grouper.group_info
+ val = self.obj.get_values()
+
+ sorter = np.lexsort((val, ids))
+ ids, val = ids[sorter], val[sorter]
+
+ # group boundries are where group ids change
+ # unique observations are where sorted values change
+ idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
+ inc = np.r_[1, val[1:] != val[:-1]]
+
+ # 1st item of each group is a new unique observation
+ mask = isnull(val)
+ if dropna:
+ inc[idx] = 1
+ inc[mask] = 0
+ else:
+ inc[mask & np.r_[False, mask[:-1]]] = 0
+ inc[idx] = 1
+
+ out = np.add.reduceat(inc, idx)
+ return Series(out if ids[0] != -1 else out[1:],
+ index=self.grouper.result_index,
+ name=self.name)
+
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index fa2e6e911ab5e..afce4e682c0f9 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1617,6 +1617,40 @@ def test_groupby_as_index_agg(self):
assert_frame_equal(left, right)
+ def test_series_groupby_nunique(self):
+ from itertools import product
+ from string import ascii_lowercase
+
+ def check_nunique(df, keys):
+ for sort, dropna in product((False, True), repeat=2):
+ gr = df.groupby(keys, sort=sort)
+ left = gr['julie'].nunique(dropna=dropna)
+
+ gr = df.groupby(keys, sort=sort)
+ right = gr['julie'].apply(Series.nunique, dropna=dropna)
+
+ assert_series_equal(left, right)
+
+ days = date_range('2015-08-23', periods=10)
+
+ for n, m in product(10**np.arange(2, 6), (10, 100, 1000)):
+ frame = DataFrame({
+ 'jim':np.random.choice(list(ascii_lowercase), n),
+ 'joe':np.random.choice(days, n),
+ 'julie':np.random.randint(0, m, n)})
+
+ check_nunique(frame, ['jim'])
+ check_nunique(frame, ['jim', 'joe'])
+
+ frame.loc[1::17, 'jim'] = None
+ frame.loc[3::37, 'joe'] = None
+ frame.loc[7::19, 'julie'] = None
+ frame.loc[8::19, 'julie'] = None
+ frame.loc[9::19, 'julie'] = None
+
+ check_nunique(frame, ['jim'])
+ check_nunique(frame, ['jim', 'joe'])
+
def test_mulitindex_passthru(self):
# GH 7997
@@ -4913,7 +4947,7 @@ def test_groupby_whitelist(self):
'corr', 'cov',
'value_counts',
'diff',
- 'unique', 'nunique',
+ 'unique',
'nlargest', 'nsmallest',
])
| closes https://github.com/pydata/pandas/issues/10820
on master:
``` ipython
In [2]: df = pd.DataFrame({'a': np.random.randint(10000, size=100000),
...: 'b': np.random.randint(10, size=100000)})
In [3]: %timeit df.groupby('a')['b'].nunique()
1 loops, best of 3: 1.66 s per loop
In [4]: %timeit df.groupby(['a', 'b'])['b'].first().groupby(level=0).size()
10 loops, best of 3: 36.3 ms per loop
```
on branch:
``` ipython
In [2]: %timeit df.groupby('a')['b'].nunique()
10 loops, best of 3: 29.2 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/10894 | 2015-08-24T00:42:38Z | 2015-08-24T18:33:56Z | 2015-08-24T18:33:56Z | 2015-11-11T16:27:48Z |
DEPR: Bunch o deprecation removals part 2 | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 19fe4e73f1f4d..fdbb5771aff3f 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -665,6 +665,7 @@ Deprecations
``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)``
(:issue:`10735`).
- ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`)
+- ``WidePanel`` deprecated in favor of ``Panel``, ``LongPanel`` in favor of ``DataFrame`` (note these have been aliases since < 0.11.0), (:issue:`10892`)
.. _whatsnew_0170.prior_deprecations:
@@ -705,6 +706,15 @@ Removal of prior version deprecations/changes
df.add(df.A,axis='index')
+
+
+
+- Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`)
+- Remove ``kind`` in ``read_excel/ExcelFile`` as its unused (:issue:`4712`)
+- Remove ``infer_type`` keyword from ``pd.read_html`` as its unused (:issue:`4770`, :issue:`7032`)
+- Remove ``offset`` and ``timeRule`` keywords from ``Series.tshift/shift``, in favor of ``freq`` (:issue:`4853`, :issue:`4864`)
+- Remove ``pd.load/pd.save`` aliases in favor of ``pd.to_pickle/pd.read_pickle`` (:issue:`3787`)
+
.. _whatsnew_0170.performance:
Performance Improvements
diff --git a/pandas/core/api.py b/pandas/core/api.py
index fde9bc77c4bd9..e2ac57e37cba6 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -12,14 +12,12 @@
from pandas.core.series import Series, TimeSeries
from pandas.core.frame import DataFrame
-from pandas.core.panel import Panel
+from pandas.core.panel import Panel, WidePanel
from pandas.core.panel4d import Panel4D
from pandas.core.groupby import groupby
from pandas.core.reshape import (pivot_simple as pivot, get_dummies,
lreshape, wide_to_long)
-WidePanel = Panel
-
from pandas.core.indexing import IndexSlice
from pandas.tseries.offsets import DateOffset
from pandas.tseries.tools import to_datetime
@@ -29,7 +27,6 @@
from pandas.tseries.period import Period, PeriodIndex
# legacy
-from pandas.core.common import save, load # deprecated, remove in 0.13
import pandas.core.datetools as datetools
from pandas.core.config import (get_option, set_option, reset_option,
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 0d74a4449a5f5..245535e47abd8 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -3313,46 +3313,6 @@ def console_encode(object, **kwds):
return pprint_thing_encoded(object,
get_option("display.encoding"))
-
-def load(path): # TODO remove in 0.13
- """
- Load pickled pandas object (or any other pickled object) from the specified
- file path
-
- Warning: Loading pickled data received from untrusted sources can be
- unsafe. See: http://docs.python.org/2.7/library/pickle.html
-
- Parameters
- ----------
- path : string
- File path
-
- Returns
- -------
- unpickled : type of object stored in file
- """
- import warnings
- warnings.warn("load is deprecated, use read_pickle", FutureWarning)
- from pandas.io.pickle import read_pickle
- return read_pickle(path)
-
-
-def save(obj, path): # TODO remove in 0.13
- """
- Pickle (serialize) object to input file path
-
- Parameters
- ----------
- obj : any object
- path : string
- File path
- """
- import warnings
- warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning)
- from pandas.io.pickle import to_pickle
- return to_pickle(obj, path)
-
-
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py
index 6678baac1dae5..28cd97f437f29 100644
--- a/pandas/core/datetools.py
+++ b/pandas/core/datetools.py
@@ -41,23 +41,3 @@
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
-
-
-def _resolve_offset(freq, kwds):
- if 'timeRule' in kwds or 'offset' in kwds:
- offset = kwds.get('offset', None)
- offset = kwds.get('timeRule', offset)
- if isinstance(offset, compat.string_types):
- offset = getOffset(offset)
- warn = True
- else:
- offset = freq
- warn = False
-
- if warn:
- import warnings
- warnings.warn("'timeRule' and 'offset' parameters are deprecated,"
- " please use 'freq' instead",
- FutureWarning)
-
- return offset
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a9979b4eb3810..0321082669a05 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2618,9 +2618,9 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
**kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
- def shift(self, periods=1, freq=None, axis=0, **kwargs):
+ def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
- axis=axis, **kwargs)
+ axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bc49e9dd79e6a..e54a6a6f11148 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1006,20 +1006,6 @@ def to_pickle(self, path):
from pandas.io.pickle import to_pickle
return to_pickle(self, path)
- def save(self, path): # TODO remove in 0.14
- "Deprecated. Use to_pickle instead"
- import warnings
- from pandas.io.pickle import to_pickle
- warnings.warn("save is deprecated, use to_pickle", FutureWarning)
- return to_pickle(self, path)
-
- def load(self, path): # TODO remove in 0.14
- "Deprecated. Use read_pickle instead."
- import warnings
- from pandas.io.pickle import read_pickle
- warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning)
- return read_pickle(path)
-
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
@@ -3806,15 +3792,15 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
shifted : %(klass)s
""")
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
- def shift(self, periods=1, freq=None, axis=0, **kwargs):
+ def shift(self, periods=1, freq=None, axis=0):
if periods == 0:
return self
block_axis = self._get_block_manager_axis(axis)
- if freq is None and not len(kwargs):
+ if freq is None:
new_data = self._data.shift(periods=periods, axis=block_axis)
else:
- return self.tshift(periods, freq, **kwargs)
+ return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
@@ -3854,7 +3840,7 @@ def slice_shift(self, periods=1, axis=0):
return new_obj.__finalize__(self)
- def tshift(self, periods=1, freq=None, axis=0, **kwargs):
+ def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available
@@ -3877,7 +3863,6 @@ def tshift(self, periods=1, freq=None, axis=0, **kwargs):
-------
shifted : NDFrame
"""
- from pandas.core.datetools import _resolve_offset
index = self._get_axis(axis)
if freq is None:
@@ -3893,24 +3878,22 @@ def tshift(self, periods=1, freq=None, axis=0, **kwargs):
if periods == 0:
return self
- offset = _resolve_offset(freq, kwargs)
-
- if isinstance(offset, string_types):
- offset = datetools.to_offset(offset)
+ if isinstance(freq, string_types):
+ freq = datetools.to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
- orig_offset = datetools.to_offset(index.freq)
- if offset == orig_offset:
+ orig_freq = datetools.to_offset(index.freq)
+ if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
- (offset.rule_code, orig_offset.rule_code))
+ (freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
- new_data.axes[block_axis] = index.shift(periods, offset)
+ new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index bc342d5919bb8..d45422ecfa81d 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1210,8 +1210,8 @@ def shift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).slice_shift(periods, axis=axis)
- def tshift(self, periods=1, freq=None, axis='major', **kwds):
- return super(Panel, self).tshift(periods, freq, axis, **kwds)
+ def tshift(self, periods=1, freq=None, axis='major'):
+ return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
@@ -1509,5 +1509,23 @@ def f(self, other, axis=0):
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
-WidePanel = Panel
-LongPanel = DataFrame
+# legacy
+class WidePanel(Panel):
+
+ def __init__(self, *args, **kwargs):
+
+ # deprecation, #10892
+ warnings.warn("WidePanel is deprecated. Please use Panel",
+ FutureWarning, stacklevel=2)
+
+ super(WidePanel, self).__init__(*args, **kwargs)
+
+class LongPanel(DataFrame):
+
+ def __init__(self, *args, **kwargs):
+
+ # deprecation, #10892
+ warnings.warn("LongPanel is deprecated. Please use DataFrame",
+ FutureWarning, stacklevel=2)
+
+ super(LongPanel, self).__init__(*args, **kwargs)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0c17104bb701e..185b6d2b74801 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2182,9 +2182,9 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
- def shift(self, periods=1, freq=None, axis=0, **kwargs):
+ def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq,
- axis=axis, **kwargs)
+ axis=axis)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index d58d6590b96c0..d5258cb32e6e0 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -78,17 +78,17 @@ def read_excel(io, sheetname=0, **kwds):
and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheetname : string, int, mixed list of strings/ints, or None, default 0
-
- Strings are used for sheet names, Integers are used in zero-indexed sheet
- positions.
-
+
+ Strings are used for sheet names, Integers are used in zero-indexed sheet
+ positions.
+
Lists of strings/integers are used to request multiple sheets.
-
+
Specify None to get all sheets.
-
+
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
-
+
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
@@ -143,11 +143,6 @@ def read_excel(io, sheetname=0, **kwds):
for more information on when a Dict of Dataframes is returned.
"""
- if 'kind' in kwds:
- kwds.pop('kind')
- warn("kind keyword is no longer supported in read_excel and may be "
- "removed in a future version", FutureWarning)
-
engine = kwds.pop('engine', None)
return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds)
@@ -207,19 +202,19 @@ def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
Parameters
----------
sheetname : string, int, mixed list of strings/ints, or None, default 0
-
- Strings are used for sheet names, Integers are used in zero-indexed sheet
- positions.
-
+
+ Strings are used for sheet names, Integers are used in zero-indexed sheet
+ positions.
+
Lists of strings/integers are used to request multiple sheets.
-
+
Specify None to get all sheets.
-
+
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
-
+
Available Cases
-
+
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
@@ -336,7 +331,7 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
def _parse_cell(cell_contents,cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
-
+
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
@@ -379,9 +374,9 @@ def _parse_cell(cell_contents,cell_typ):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
-
+
ret_dict = False
-
+
#Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
@@ -391,31 +386,31 @@ def _parse_cell(cell_contents,cell_typ):
ret_dict = True
else:
sheets = [sheetname]
-
+
#handle same-type duplicates.
sheets = list(set(sheets))
-
+
output = {}
-
+
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
-
+
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
- else: # assume an integer if not a string
- sheet = self.book.sheet_by_index(asheetname)
-
+ else: # assume an integer if not a string
+ sheet = self.book.sheet_by_index(asheetname)
+
data = []
should_parse = {}
-
+
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
-
+
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value,typ))
data.append(row)
@@ -436,14 +431,14 @@ def _parse_cell(cell_contents,cell_typ):
skip_footer=skip_footer,
chunksize=chunksize,
**kwds)
-
+
output[asheetname] = parser.read()
-
+
if ret_dict:
return output
else:
return output[asheetname]
-
+
@property
def sheet_names(self):
diff --git a/pandas/io/html.py b/pandas/io/html.py
index b806b5147c4a5..cb2ee7b1c1e3f 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -604,7 +604,7 @@ def _expand_elements(body):
body[ind] += empty * (lens_max - length)
-def _data_to_frame(data, header, index_col, skiprows, infer_types,
+def _data_to_frame(data, header, index_col, skiprows,
parse_dates, tupleize_cols, thousands):
head, body, foot = data
@@ -707,7 +707,7 @@ def _validate_flavor(flavor):
return flavor
-def _parse(flavor, io, match, header, index_col, skiprows, infer_types,
+def _parse(flavor, io, match, header, index_col, skiprows,
parse_dates, tupleize_cols, thousands, attrs, encoding):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
@@ -730,15 +730,20 @@ def _parse(flavor, io, match, header, index_col, skiprows, infer_types,
ret = []
for table in tables:
try:
- ret.append(_data_to_frame(table, header, index_col, skiprows,
- infer_types, parse_dates, tupleize_cols, thousands))
+ ret.append(_data_to_frame(data=table,
+ header=header,
+ index_col=index_col,
+ skiprows=skiprows,
+ parse_dates=parse_dates,
+ tupleize_cols=tupleize_cols,
+ thousands=thousands))
except StopIteration: # empty table
continue
return ret
def read_html(io, match='.+', flavor=None, header=None, index_col=None,
- skiprows=None, infer_types=None, attrs=None, parse_dates=False,
+ skiprows=None, attrs=None, parse_dates=False,
tupleize_cols=False, thousands=',', encoding=None):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
@@ -776,9 +781,6 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None,
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
- infer_types : None, optional
- This has no effect since 0.15.0. It is here for backwards compatibility.
-
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
@@ -853,13 +855,11 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None,
pandas.read_csv
"""
_importers()
- if infer_types is not None:
- warnings.warn("infer_types has no effect since 0.15", FutureWarning)
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
- return _parse(flavor, io, match, header, index_col, skiprows, infer_types,
+ return _parse(flavor, io, match, header, index_col, skiprows,
parse_dates, tupleize_cols, thousands, attrs, encoding)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b23a183cdc145..dd02157e201d5 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1117,17 +1117,6 @@ def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
- # table arg
- table = kwargs.pop('table', None)
-
- if table is not None:
- warnings.warn(format_deprecate_doc, FutureWarning)
-
- if table:
- format = 'table'
- else:
- format = 'fixed'
-
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py
index 9093df9f0bf62..921107859082d 100644
--- a/pandas/io/tests/test_html.py
+++ b/pandas/io/tests/test_html.py
@@ -137,12 +137,10 @@ def test_banklist(self):
assert_framelist_equal(df1, df2)
def test_spam_no_types(self):
- with tm.assert_produces_warning(FutureWarning):
- df1 = self.read_html(self.spam_data, '.*Water.*',
- infer_types=False)
- with tm.assert_produces_warning(FutureWarning):
- df2 = self.read_html(self.spam_data, 'Unit', infer_types=False)
+ # infer_types removed in #10892
+ df1 = self.read_html(self.spam_data, '.*Water.*')
+ df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
self.assertEqual(df1[0].ix[0, 0], 'Proximates')
@@ -230,12 +228,9 @@ def test_index(self):
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
- with tm.assert_produces_warning(FutureWarning):
- df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
- index_col=0, infer_types=False)
- with tm.assert_produces_warning(FutureWarning):
- df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0,
- infer_types=False)
+ df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
+ index_col=0)
+ df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
@@ -245,18 +240,10 @@ def test_header_and_index_with_types(self):
assert_framelist_equal(df1, df2)
def test_infer_types(self):
- with tm.assert_produces_warning(FutureWarning):
- df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0,
- infer_types=False)
- with tm.assert_produces_warning(FutureWarning):
- df2 = self.read_html(self.spam_data, 'Unit', index_col=0,
- infer_types=False)
- assert_framelist_equal(df1, df2)
-
- with tm.assert_produces_warning(FutureWarning):
- df2 = self.read_html(self.spam_data, 'Unit', index_col=0,
- infer_types=True)
+ # 10892 infer_types removed
+ df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
+ df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
@@ -641,8 +628,7 @@ def test_computer_sales_page(self):
with tm.assertRaisesRegexp(CParserError, r"Passed header=\[0,1\] are "
"too many rows for this multi_index "
"of columns"):
- with tm.assert_produces_warning(FutureWarning):
- self.read_html(data, infer_types=False, header=[0, 1])
+ self.read_html(data, header=[0, 1])
def test_wikipedia_states_table(self):
data = os.path.join(DATA_PATH, 'wikipedia_states.html')
@@ -751,8 +737,7 @@ def test_parse_dates_combine(self):
def test_computer_sales_page(self):
data = os.path.join(DATA_PATH, 'computer_sales_page.html')
- with tm.assert_produces_warning(FutureWarning):
- self.read_html(data, infer_types=False, header=[0, 1])
+ self.read_html(data, header=[0, 1])
def test_invalid_flavor():
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 210852d83094f..3a128fa3f247d 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -1040,7 +1040,7 @@ def test_append_all_nans(self):
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
- # Test to make sure defaults are to not drop.
+ # Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame({'col1':[0, np.nan, 2], 'col2':[1, np.nan, np.nan]})
@@ -1059,7 +1059,7 @@ def test_append_all_nans(self):
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(path, 'panel_with_missing', format='table')
- reloaded_panel = read_hdf(path, 'panel_with_missing')
+ reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
@@ -2440,9 +2440,9 @@ def test_terms(self):
p4d = tm.makePanel4D()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
- store.put('wp', wp, table=True)
- store.put('p4d', p4d, table=True)
- store.put('wpneg', wpneg, table=True)
+ store.put('wp', wp, format='table')
+ store.put('p4d', p4d, format='table')
+ store.put('wpneg', wpneg, format='table')
# panel
result = store.select('wp', [Term(
@@ -2607,7 +2607,7 @@ def test_same_name_scoping(self):
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
- store.put('df', df, table=True)
+ store.put('df', df, format='table')
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
@@ -3608,7 +3608,7 @@ def test_frame_select_complex(self):
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
- store.put('df', df, table=True, data_columns=['string'])
+ store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
@@ -3717,7 +3717,7 @@ def test_invalid_filtering(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
- store.put('df', df, table=True)
+ store.put('df', df, format='table')
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index 62d6a34655e1d..420cf509395ce 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -604,13 +604,10 @@ def dropna(self, axis=0, inplace=False, **kwargs):
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
- def shift(self, periods, freq=None, **kwds):
+ def shift(self, periods, freq=None):
"""
Analogous to Series.shift
"""
- from pandas.core.datetools import _resolve_offset
-
- offset = _resolve_offset(freq, kwds)
# no special handling of fill values yet
if not isnull(self.fill_value):
@@ -622,10 +619,10 @@ def shift(self, periods, freq=None, **kwds):
if periods == 0:
return self.copy()
- if offset is not None:
+ if freq is not None:
return self._constructor(self.sp_values,
sparse_index=self.sp_index,
- index=self.index.shift(periods, offset),
+ index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 6424a190dba9f..a429059c761d6 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5385,10 +5385,10 @@ def test_shift(self):
self.assertRaises(ValueError, ps.shift, freq='D')
# legacy support
- shifted4 = ps.shift(1, timeRule='B')
+ shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
- shifted5 = ps.shift(1, offset=datetools.bday)
+ shifted5 = ps.shift(1, freq=datetools.bday)
assert_series_equal(shifted5, shifted4)
# 32-bit taking
| - Remove the table keyword in HDFStore.put/append, in favor of using format= #4645
- Remove unused keyword `kind` in `read_excel/ExcelFile` #4712
- Remove `infer_type` keyword from `pd.read_html` as its unused, #4770, #7032
- Remove `offset` and `timeRule` keywords from `Series.tshift/shift`, in favor of `freq`, #4853, #4864
- Remove `pd.load/pd.save` aliases in favor of `pd.to_pickle/pd.read_pickle`, #3787
- Deprecate `WidePanel/LongPanel`
| https://api.github.com/repos/pandas-dev/pandas/pulls/10892 | 2015-08-23T12:44:39Z | 2015-08-24T18:24:19Z | 2015-08-24T18:24:19Z | 2015-08-24T18:24:19Z |
DEPR: Bunch o deprecation removals | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d30b7875e44b7..7415ac01ada7a 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -37,6 +37,7 @@ Highlights include:
- Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>`
- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`)
- Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>`
+- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>`
Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating.
@@ -663,6 +664,7 @@ Deprecations
can easily be replaced by using the ``add`` and ``mul`` methods:
``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)``
(:issue:`10735`).
+- ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`)
.. _whatsnew_0170.prior_deprecations:
@@ -672,6 +674,36 @@ Removal of prior version deprecations/changes
- Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`)
- Removal of ``na_last`` parameters from ``Series.order()`` and ``Series.sort()``, in favor of ``na_position``, xref (:issue:`5231`)
- Remove of ``percentile_width`` from ``.describe()``, in favor of ``percentiles``. (:issue:`7088`)
+- Removal of ``colSpace`` parameter from ``DataFrame.to_string()``, in favor of ``col_space``, circa 0.8.0 version.
+- Removal of automatic time-series broadcasting (:issue:`2304`)
+
+ .. ipython :: python
+
+ np.random.seed(1234)
+ df = DataFrame(np.random.randn(5,2),columns=list('AB'),index=date_range('20130101',periods=5))
+ df
+
+ Previously
+
+ .. code-block:: python
+
+ In [3]: df + df.A
+ FutureWarning: TimeSeries broadcasting along DataFrame index by default is deprecated.
+ Please use DataFrame.<op> to explicitly broadcast arithmetic operations along the index
+
+ Out[3]:
+ A B
+ 2013-01-01 0.942870 -0.719541
+ 2013-01-02 2.865414 1.120055
+ 2013-01-03 -1.441177 0.166574
+ 2013-01-04 1.719177 0.223065
+ 2013-01-05 0.031393 -2.226989
+
+ Current
+
+ .. ipython :: python
+
+ df.add(df.A,axis='index')
.. _whatsnew_0170.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1f222f9f99cbe..a9979b4eb3810 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1396,7 +1396,7 @@ def to_stata(
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
- def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
+ def to_string(self, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
@@ -1405,11 +1405,6 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
Render a DataFrame to a console-friendly tabular output.
"""
- if colSpace is not None: # pragma: no cover
- warnings.warn("colSpace is deprecated, use col_space",
- FutureWarning)
- col_space = colSpace
-
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
@@ -3359,16 +3354,7 @@ def _combine_series_infer(self, other, func, level=None, fill_value=None):
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
- # teeny hack because one does DataFrame + TimeSeries all the time
- if self.index.is_all_dates and other.index.is_all_dates:
- warnings.warn(("TimeSeries broadcasting along DataFrame index "
- "by default is deprecated. Please use "
- "DataFrame.<op> to explicitly broadcast arithmetic "
- "operations along the index"),
- FutureWarning)
- return self._combine_match_index(other, func, level=level, fill_value=fill_value)
- else:
- return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
+ return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level, copy=False)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8768d0e139e7b..0c17104bb701e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -261,6 +261,7 @@ def _set_axis(self, axis, labels, fastpath=False):
is_all_dates = labels.is_all_dates
if is_all_dates:
+
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
labels = DatetimeIndex(labels)
@@ -2779,7 +2780,14 @@ def _try_cast(arr, take_fast_path):
return subarr
# backwards compatiblity
-TimeSeries = Series
+class TimeSeries(Series):
+
+ def __init__(self, *args, **kwargs):
+ # deprecation TimeSeries, #10890
+ warnings.warn("TimeSeries is deprecated. Please use Series",
+ FutureWarning, stacklevel=2)
+
+ super(TimeSeries, self).__init__(*args, **kwargs)
#----------------------------------------------------------------------
# Add plotting methods to Series
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 8ef6363f836ae..b23a183cdc145 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -13,7 +13,8 @@
import os
import numpy as np
-from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index,
+import pandas as pd
+from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, Timestamp)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
@@ -164,7 +165,7 @@ class DuplicateWarning(Warning):
Series: u('series'),
SparseSeries: u('sparse_series'),
- TimeSeries: u('series'),
+ pd.TimeSeries: u('series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py
index 86c5a9e0d7f19..0ca5ced1b8d1a 100644
--- a/pandas/io/tests/generate_legacy_storage_files.py
+++ b/pandas/io/tests/generate_legacy_storage_files.py
@@ -1,8 +1,8 @@
""" self-contained to write legacy storage (pickle/msgpack) files """
from __future__ import print_function
from distutils.version import LooseVersion
-from pandas import (Series, TimeSeries, DataFrame, Panel,
- SparseSeries, SparseTimeSeries, SparseDataFrame, SparsePanel,
+from pandas import (Series, DataFrame, Panel,
+ SparseSeries, SparseDataFrame, SparsePanel,
Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack,
date_range, period_range, bdate_range, Timestamp, Categorical,
Period)
@@ -36,7 +36,7 @@ def _create_sp_tsseries():
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
- bseries = SparseTimeSeries(arr, index=date_index, kind='block')
+ bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = 'btsseries'
return bseries
@@ -78,7 +78,7 @@ def create_data():
series = dict(float=Series(data['A']),
int=Series(data['B']),
mixed=Series(data['E']),
- ts=TimeSeries(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)),
+ ts=Series(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
names=['one', 'two'])),
diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py
index a8addfab17c26..62d6a34655e1d 100644
--- a/pandas/sparse/series.py
+++ b/pandas/sparse/series.py
@@ -7,7 +7,7 @@
from numpy import nan, ndarray
import numpy as np
-
+import warnings
import operator
from pandas.core.common import isnull, _values_from_object, _maybe_match_name
@@ -770,4 +770,11 @@ def from_coo(cls, A, dense_index=False):
bool_method=None, use_numexpr=False, force=True)
# backwards compatiblity
-SparseTimeSeries = SparseSeries
+class SparseTimeSeries(SparseSeries):
+
+ def __init__(self, *args, **kwargs):
+ # deprecation TimeSeries, #10890
+ warnings.warn("SparseTimeSeries is deprecated. Please use SparseSeries",
+ FutureWarning, stacklevel=2)
+
+ super(SparseTimeSeries, self).__init__(*args, **kwargs)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index 103f3992f950a..8d24025f3c3cf 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -30,7 +30,7 @@
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
-from pandas.sparse.api import (SparseSeries, SparseTimeSeries,
+from pandas.sparse.api import (SparseSeries,
SparseDataFrame, SparsePanel,
SparseArray)
import pandas.tests.test_frame as test_frame
@@ -160,6 +160,12 @@ def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
+ def test_TimeSeries_deprecation(self):
+
+ # deprecation TimeSeries, #10890
+ with tm.assert_produces_warning(FutureWarning):
+ pd.SparseTimeSeries(1,index=pd.date_range('20130101',periods=3))
+
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
@@ -258,7 +264,7 @@ def _check_const(sparse, name):
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
- tm.assertIsInstance(s5, SparseTimeSeries)
+ tm.assertIsInstance(s5, SparseSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
@@ -1189,14 +1195,19 @@ def _compare_to_dense(a, b, da, db, op):
frame['A'].reindex(fidx[::2]),
SparseSeries([], index=[])]
- for op in ops:
+ for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
- frame[::2].to_dense(), op)
+ frame[::2].to_dense(), getattr(operator, op))
+
+ # 2304, no auto-broadcasting
for i, s in enumerate(series):
+ f = lambda a, b: getattr(a,op)(b,axis='index')
_compare_to_dense(frame, s, frame.to_dense(),
- s.to_dense(), op)
- _compare_to_dense(s, frame, s.to_dense(),
- frame.to_dense(), op)
+ s.to_dense(), f)
+
+ # rops are not implemented
+ #_compare_to_dense(s, frame, s.to_dense(),
+ # frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]),
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 022594e296c2a..9687d9b742126 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6039,46 +6039,47 @@ def test_combineSeries(self):
#added = self.mixed_int + (100*series).astype('int32')
#_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))
- # TimeSeries
- buf = StringIO()
- tmp = sys.stderr
- sys.stderr = buf
- try:
- ts = self.tsframe['A']
- added = self.tsframe + ts
-
- for key, col in compat.iteritems(self.tsframe):
- result = col + ts
- assert_series_equal(added[key], result, check_names=False)
- self.assertEqual(added[key].name, key)
- if col.name == ts.name:
- self.assertEqual(result.name, 'A')
- else:
- self.assertTrue(result.name is None)
+ # TimeSeries
+ ts = self.tsframe['A']
+
+ # 10890
+ # we no longer allow auto timeseries broadcasting
+ # and require explict broadcasting
+ added = self.tsframe.add(ts, axis='index')
+
+ for key, col in compat.iteritems(self.tsframe):
+ result = col + ts
+ assert_series_equal(added[key], result, check_names=False)
+ self.assertEqual(added[key].name, key)
+ if col.name == ts.name:
+ self.assertEqual(result.name, 'A')
+ else:
+ self.assertTrue(result.name is None)
- smaller_frame = self.tsframe[:-5]
- smaller_added = smaller_frame + ts
+ smaller_frame = self.tsframe[:-5]
+ smaller_added = smaller_frame.add(ts, axis='index')
- self.assertTrue(smaller_added.index.equals(self.tsframe.index))
+ self.assertTrue(smaller_added.index.equals(self.tsframe.index))
- smaller_ts = ts[:-5]
- smaller_added2 = self.tsframe + smaller_ts
- assert_frame_equal(smaller_added, smaller_added2)
+ smaller_ts = ts[:-5]
+ smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
+ assert_frame_equal(smaller_added, smaller_added2)
- # length 0
- result = self.tsframe + ts[:0]
+ # length 0, result is all-nan
+ result = self.tsframe.add(ts[:0], axis='index')
+ expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
+ assert_frame_equal(result, expected)
- # Frame is length 0
- result = self.tsframe[:0] + ts
- self.assertEqual(len(result), 0)
+ # Frame is all-nan
+ result = self.tsframe[:0].add(ts, axis='index')
+ expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns)
+ assert_frame_equal(result, expected)
- # empty but with non-empty index
- frame = self.tsframe[:1].reindex(columns=[])
- result = frame * ts
- self.assertEqual(len(result), len(ts))
- finally:
- sys.stderr = tmp
+ # empty but with non-empty index
+ frame = self.tsframe[:1].reindex(columns=[])
+ result = frame.mul(ts,axis='index')
+ self.assertEqual(len(result), len(ts))
def test_combineFunc(self):
result = self.frame * 2
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index fa2e6e911ab5e..d1073b6c4d7ab 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -502,9 +502,8 @@ def test_groupby_bounds_check(self):
self.assertRaises(AssertionError, pd.algos.groupby_object,a, b)
def test_groupby_grouper_f_sanity_checked(self):
- import pandas as pd
dates = date_range('01-Jan-2013', periods=12, freq='MS')
- ts = pd.TimeSeries(np.random.randn(12), index=dates)
+ ts = Series(np.random.randn(12), index=dates)
# GH3035
# index.map is used to apply grouper to the index
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 3567c98e71bce..6424a190dba9f 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -666,6 +666,12 @@ def test_astype(self):
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
+ def test_TimeSeries_deprecation(self):
+
+ # deprecation TimeSeries, #10890
+ with tm.assert_produces_warning(FutureWarning):
+ pd.TimeSeries(1,index=date_range('20130101',periods=3))
+
def test_constructor(self):
# Recognize TimeSeries
self.assertTrue(self.ts.is_time_series)
@@ -4515,10 +4521,10 @@ def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
- tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
- tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
- tm.assert_almost_equal(self.ts < self.ts, (self.ts < df)['A'])
- tm.assert_almost_equal(self.ts / self.ts, (self.ts / df)['A'])
+ tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A'])
+ tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A'])
+ tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A'])
+ tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 6a9ad175f42dd..7886a63c6df46 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -15,7 +15,7 @@
from pandas.compat import range, lrange, u, unichr
import pandas.compat as compat
-from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
+from pandas import (Index, Series, DataFrame, isnull, notnull,
bdate_range, date_range, MultiIndex)
import pandas.core.common as com
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py
index eb5c6759bfa45..e0434bfec3be4 100644
--- a/pandas/tseries/tests/test_period.py
+++ b/pandas/tseries/tests/test_period.py
@@ -24,7 +24,7 @@
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
-from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9
+from pandas import Series, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
@@ -1191,7 +1191,7 @@ def test_hash_error(self):
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
- tm.assertIsInstance(series, TimeSeries)
+ tm.assertIsInstance(series, Series)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py
index 4b3085dc8259f..7dafc88bf9239 100644
--- a/pandas/tseries/tests/test_resample.py
+++ b/pandas/tseries/tests/test_resample.py
@@ -980,7 +980,7 @@ def _simple_ts(start, end, freq='D'):
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
- return TimeSeries(np.random.randn(len(rng)), index=rng)
+ return Series(np.random.randn(len(rng)), index=rng)
class TestResamplePeriodIndex(tm.TestCase):
@@ -1177,7 +1177,7 @@ def test_resample_to_quarterly(self):
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
- s = TimeSeries(np.random.randn(4), index=rng)
+ s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
@@ -1191,12 +1191,12 @@ def test_resample_fill_missing(self):
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
- s = TimeSeries(np.random.randn(5), index=rng)
+ s = Series(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
- ts = TimeSeries(np.random.randn(len(rng)), index=rng)
+ ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
@@ -1402,7 +1402,7 @@ def test_evenly_divisible_with_no_extra_bins(self):
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
- index=index.append(index)).sort()
+ index=index.append(index)).sort_index()
index = date_range('2001-5-4',periods=4,freq='7D')
expected = DataFrame(
@@ -1430,7 +1430,7 @@ def test_apply(self):
grouped = self.ts.groupby(grouper)
- f = lambda x: x.order()[-3:]
+ f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index e02973136863d..f416a8939ac82 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -9,7 +9,7 @@
import numpy as np
randn = np.random.randn
-from pandas import (Index, Series, TimeSeries, DataFrame,
+from pandas import (Index, Series, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index, TimedeltaIndex, NaT)
@@ -60,7 +60,7 @@ def setUp(self):
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
- tm.assertIsInstance(self.dups, TimeSeries)
+ tm.assertIsInstance(self.dups, Series)
tm.assertIsInstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py
index 6889f8e2afbb2..4cbc171364ee6 100644
--- a/pandas/tseries/tests/test_timeseries_legacy.py
+++ b/pandas/tseries/tests/test_timeseries_legacy.py
@@ -8,7 +8,7 @@
import numpy as np
randn = np.random.randn
-from pandas import (Index, Series, TimeSeries, DataFrame,
+from pandas import (Index, Series, DataFrame,
isnull, date_range, Timestamp, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py
index 6c534de0a7aaa..4f29b2bf31f83 100644
--- a/pandas/tseries/util.py
+++ b/pandas/tseries/util.py
@@ -28,7 +28,7 @@ def pivot_annual(series, freq=None):
Parameters
----------
- series : TimeSeries
+ series : Series
freq : string or None, default None
Returns
| xref #6581
remove `colSpace`
remove auto time series broadcasting, xref #2304
deprecate `TimeSeries` (forgot was not actually deprecated)
| https://api.github.com/repos/pandas-dev/pandas/pulls/10890 | 2015-08-22T21:50:42Z | 2015-08-24T18:22:38Z | 2015-08-24T18:22:38Z | 2015-08-24T18:22:38Z |
BUG: encoding of categoricals in hdf serialization | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 9049d8de550d0..c18bedd0cf6eb 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -818,7 +818,7 @@ Bug Fixes
- Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`)
-
+- Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`)
- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`)
- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`)
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index dd02157e201d5..ea0a59ce2ab31 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3039,7 +3039,8 @@ def write_metadata(self, key, values):
"""
values = Series(values)
- self.parent.put(self._get_metadata_path(key), values, format='table')
+ self.parent.put(self._get_metadata_path(key), values, format='table',
+ encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
@@ -4389,11 +4390,23 @@ def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
def _convert_string_array(data, encoding, itemsize=None):
+ """
+ we take a string-like that is object dtype and coerce to a fixed size string type
+
+ Parameters
+ ----------
+ data : a numpy array of object dtype
+ encoding : None or string-encoding
+ itemsize : integer, optional, defaults to the max length of the strings
+
+ Returns
+ -------
+ data in a fixed-length string dtype, encoded to bytes if needed
+ """
# encode if needed
if encoding is not None and len(data):
- f = np.vectorize(lambda x: x.encode(encoding), otypes=[np.object])
- data = f(data)
+ data = Series(data.ravel()).str.encode(encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
@@ -4403,7 +4416,20 @@ def _convert_string_array(data, encoding, itemsize=None):
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
- """ deserialize a string array, possibly decoding """
+ """
+ inverse of _convert_string_array
+
+ Parameters
+ ----------
+ data : fixed length string dtyped array
+ nan_rep : the storage repr of NaN, optional
+ encoding : the encoding of the data, optional
+
+ Returns
+ -------
+ an object array of the decoded data
+
+ """
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
@@ -4412,16 +4438,16 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None):
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
- try:
- itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
- if compat.PY3:
- dtype = "U{0}".format(itemsize)
- else:
- dtype = "S{0}".format(itemsize)
+ itemsize = lib.max_len_string_array(com._ensure_object(data))
+ if compat.PY3:
+ dtype = "U{0}".format(itemsize)
+ else:
+ dtype = "S{0}".format(itemsize)
+
+ if isinstance(data[0], compat.binary_type):
+ data = Series(data).str.decode(encoding).values
+ else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
- except (Exception) as e:
- f = np.vectorize(lambda x: x.decode(encoding), otypes=[np.object])
- data = f(data)
if nan_rep is None:
nan_rep = 'nan'
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 3a128fa3f247d..b4f1e6a429198 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -930,6 +930,51 @@ def test_encoding(self):
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
+ def test_latin_encoding(self):
+
+ if compat.PY2:
+ self.assertRaisesRegexp(TypeError, '\[unicode\] is not implemented as a table column')
+ return
+
+ values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
+ [b'E\xc9, 17', b'a', b'b', b'c'],
+ [b'EE, 17', b'', b'a', b'b', b'c'],
+ [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
+ [b'', b'a', b'b', b'c'],
+ [b'\xf8\xfc', b'a', b'b', b'c'],
+ [b'A\xf8\xfc', b'', b'a', b'b', b'c'],
+ [np.nan, b'', b'b', b'c'],
+ [b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
+
+ def _try_decode(x, encoding='latin-1'):
+ try:
+ return x.decode(encoding)
+ except AttributeError:
+ return x
+ # not sure how to remove latin-1 from code in python 2 and 3
+ values = [[_try_decode(x) for x in y] for y in values]
+
+ examples = []
+ for dtype in ['category', object]:
+ for val in values:
+ examples.append(pandas.Series(val, dtype=dtype))
+
+ def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
+ with ensure_clean_path(self.path) as store:
+ s.to_hdf(store, key, format='table', encoding=encoding,
+ nan_rep=nan_rep)
+ retr = read_hdf(store, key)
+ s_nan = s.replace(nan_rep, np.nan)
+ assert_series_equal(s_nan, retr)
+
+ for s in examples:
+ roundtrip(s)
+
+ # fails:
+ # for x in examples:
+ # roundtrip(s, nan_rep=b'\xf8\xfc')
+
+
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
| closes #10366
replaces #10454
| https://api.github.com/repos/pandas-dev/pandas/pulls/10889 | 2015-08-22T20:20:30Z | 2015-08-28T02:29:04Z | 2015-08-28T02:29:03Z | 2015-08-28T02:29:10Z |
BUG: GH10885 where an edge case in date_range produces an extra point | diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index d30b7875e44b7..cc8f135eb62b0 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -781,6 +781,7 @@ Bug Fixes
- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`)
- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`)
- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`)
+- Bug in ``date_range`` when ``freq`` divides ``end`` as nanos (:issue:`10885`)
- Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`)
- Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`)
- Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 0525a29ef3fd0..c6c66a62b86b5 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1,4 +1,5 @@
# pylint: disable=E1101
+from __future__ import division
import operator
import warnings
from datetime import time, datetime
@@ -1793,8 +1794,9 @@ def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
- e = Timestamp(end).value
- e += stride - e % stride
+ # cannot just use e = Timestamp(end) + 1 because arange breaks when
+ # stride is too large, see GH10887
+ e = b + (Timestamp(end).value - b)//stride * stride + stride//2
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py
index 86e0f7162c545..42136c3433977 100644
--- a/pandas/tseries/tests/test_daterange.py
+++ b/pandas/tseries/tests/test_daterange.py
@@ -490,6 +490,18 @@ def test_years_only(self):
self.assertEqual(dr[0], datetime(2014, 1, 31))
self.assertEqual(dr[-1], datetime(2014, 12, 31))
+ def test_freq_divides_end_in_nanos(self):
+ # GH 10885
+ result_1 = date_range('2005-01-12 10:00', '2005-01-12 16:00',
+ freq='345min')
+ result_2 = date_range('2005-01-13 10:00', '2005-01-13 16:00',
+ freq='345min')
+ expected_1 = DatetimeIndex(['2005-01-12 10:00:00', '2005-01-12 15:45:00'],
+ dtype='datetime64[ns]', freq='345T', tz=None)
+ expected_2 = DatetimeIndex(['2005-01-13 10:00:00', '2005-01-13 15:45:00'],
+ dtype='datetime64[ns]', freq='345T', tz=None)
+ self.assertTrue(result_1.equals(expected_1))
+ self.assertTrue(result_2.equals(expected_2))
class TestCustomDateRange(tm.TestCase):
| closes #10885
| https://api.github.com/repos/pandas-dev/pandas/pulls/10887 | 2015-08-22T14:48:11Z | 2015-08-24T11:37:59Z | 2015-08-24T11:37:58Z | 2015-08-24T13:59:27Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.