title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TST/BLD: test 3.6 on appveyor
diff --git a/appveyor.yml b/appveyor.yml index a8e5218ab2c9f..33b8be57eba62 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -17,20 +17,19 @@ environment: matrix: - # disable python 3.4 ATM - #- PYTHON: "C:\\Python34_64" - # PYTHON_VERSION: "3.4" - # PYTHON_ARCH: "64" - # CONDA_PY: "34" - # CONDA_NPY: "19" - - - PYTHON: "C:\\Python27_64" + - CONDA_ROOT: "C:\\Miniconda3.5_64" + PYTHON_VERSION: "3.6" + PYTHON_ARCH: "64" + CONDA_PY: "36" + CONDA_NPY: "111" + + - CONDA_ROOT: "C:\\Miniconda3.5_64" PYTHON_VERSION: "2.7" PYTHON_ARCH: "64" CONDA_PY: "27" CONDA_NPY: "110" - - PYTHON: "C:\\Python35_64" + - CONDA_ROOT: "C:\\Miniconda3.5_64" PYTHON_VERSION: "3.5" PYTHON_ARCH: "64" CONDA_PY: "35" @@ -45,9 +44,6 @@ platform: # all our python builds have to happen in tests_script... build: false -init: - - "ECHO %PYTHON_VERSION% %PYTHON%" - install: # cancel older builds for the same PR - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` @@ -58,7 +54,7 @@ install: # this installs the appropriate Miniconda (Py2/Py3, 32/64 bit) # updates conda & installs: conda-build jinja2 anaconda-client - powershell .\ci\install.ps1 - - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% + - SET PATH=%CONDA_ROOT%;%CONDA_ROOT%\Scripts;%PATH% - echo "install" - cd - ls -ltr @@ -70,13 +66,6 @@ install: # install our build environment - cmd: conda config --set show_channel_urls true --set always_yes true --set changeps1 false - cmd: conda update -q conda - - # fix conda-build version - # https://github.com/conda/conda-build/issues/1001 - # disabling 3.4 as windows complains upon compiling byte - # code - - - cmd: conda install conda-build=1.21.7 - cmd: conda config --set ssl_verify false # add the pandas channel *before* defaults to have defaults take priority @@ -84,7 +73,6 @@ install: - cmd: conda config --add channels pandas - cmd: conda config --remove channels defaults - cmd: conda config --add channels defaults - - cmd: conda install anaconda-client # this is now the downloaded conda... - cmd: conda info -a @@ -98,6 +86,8 @@ install: - SET REQ=ci\requirements-%PYTHON_VERSION%-%PYTHON_ARCH%.run - cmd: echo "installing requirements from %REQ%" - cmd: conda install -n pandas -q --file=%REQ% + - cmd: conda list -n pandas + - cmd: echo "installing requirements from %REQ% - done" - ps: conda install -n pandas (conda build ci\appveyor.recipe -q --output) test_script: diff --git a/ci/appveyor.recipe/meta.yaml b/ci/appveyor.recipe/meta.yaml index 6bf0a14bc7d0b..777fd9d682d48 100644 --- a/ci/appveyor.recipe/meta.yaml +++ b/ci/appveyor.recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: pandas - version: 0.18.1 + version: 0.20.0 build: number: {{environ.get('APPVEYOR_BUILD_NUMBER', 0)}} # [win] diff --git a/ci/install.ps1 b/ci/install.ps1 index 16c92dc76d273..64ec7f81884cd 100644 --- a/ci/install.ps1 +++ b/ci/install.ps1 @@ -84,9 +84,9 @@ function UpdateConda ($python_home) { function main () { - InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON - UpdateConda $env:PYTHON - InstallCondaPackages $env:PYTHON "conda-build jinja2 anaconda-client" + InstallMiniconda "3.5" $env:PYTHON_ARCH $env:CONDA_ROOT + UpdateConda $env:CONDA_ROOT + InstallCondaPackages $env:CONDA_ROOT "conda-build jinja2 anaconda-client" } main diff --git a/ci/install_appveyor.ps1 b/ci/install_appveyor.ps1 deleted file mode 100644 index a022995dc7d58..0000000000000 --- a/ci/install_appveyor.ps1 +++ /dev/null @@ -1,133 +0,0 @@ -# Sample script to install Miniconda under Windows -# Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner, Robert McGibbon -# License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ - -$MINICONDA_URL = "http://repo.continuum.io/miniconda/" - - -function DownloadMiniconda ($python_version, $platform_suffix) { - $webclient = New-Object System.Net.WebClient - if ($python_version -match "3.4") { - $filename = "Miniconda3-3.5.5-Windows-" + $platform_suffix + ".exe" - } else { - $filename = "Miniconda-3.5.5-Windows-" + $platform_suffix + ".exe" - } - $url = $MINICONDA_URL + $filename - - $basedir = $pwd.Path + "\" - $filepath = $basedir + $filename - if (Test-Path $filename) { - Write-Host "Reusing" $filepath - return $filepath - } - - # Download and retry up to 3 times in case of network transient errors. - Write-Host "Downloading" $filename "from" $url - $retry_attempts = 2 - for($i=0; $i -lt $retry_attempts; $i++){ - try { - $webclient.DownloadFile($url, $filepath) - break - } - Catch [Exception]{ - Start-Sleep 1 - } - } - if (Test-Path $filepath) { - Write-Host "File saved at" $filepath - } else { - # Retry once to get the error message if any at the last try - $webclient.DownloadFile($url, $filepath) - } - return $filepath -} - -function Start-Executable { - param( - [String] $FilePath, - [String[]] $ArgumentList - ) - $OFS = " " - $process = New-Object System.Diagnostics.Process - $process.StartInfo.FileName = $FilePath - $process.StartInfo.Arguments = $ArgumentList - $process.StartInfo.UseShellExecute = $false - $process.StartInfo.RedirectStandardOutput = $true - if ( $process.Start() ) { - $output = $process.StandardOutput.ReadToEnd() ` - -replace "\r\n$","" - if ( $output ) { - if ( $output.Contains("`r`n") ) { - $output -split "`r`n" - } - elseif ( $output.Contains("`n") ) { - $output -split "`n" - } - else { - $output - } - } - $process.WaitForExit() - & "$Env:SystemRoot\system32\cmd.exe" ` - /c exit $process.ExitCode - } - } - -function InstallMiniconda ($python_version, $architecture, $python_home) { - Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home - if (Test-Path $python_home) { - Write-Host $python_home "already exists, skipping." - return $false - } - if ($architecture -match "32") { - $platform_suffix = "x86" - } else { - $platform_suffix = "x86_64" - } - - $filepath = DownloadMiniconda $python_version $platform_suffix - Write-Host "Installing" $filepath "to" $python_home - $install_log = $python_home + ".log" - $args = "/S /D=$python_home" - Write-Host $filepath $args - Start-Process -FilePath $filepath -ArgumentList $args -Wait - if (Test-Path $python_home) { - Write-Host "Python $python_version ($architecture) installation complete" - } else { - Write-Host "Failed to install Python in $python_home" - Get-Content -Path $install_log - Exit 1 - } -} - - -function InstallCondaPackages ($python_home, $spec) { - $conda_path = $python_home + "\Scripts\conda.exe" - $args = "install --yes --quiet " + $spec - Write-Host ("conda " + $args) - Start-Executable -FilePath "$conda_path" -ArgumentList $args -} -function InstallCondaPackagesFromFile ($python_home, $ver, $arch) { - $conda_path = $python_home + "\Scripts\conda.exe" - $args = "install --yes --quiet --file " + $env:APPVEYOR_BUILD_FOLDER + "\ci\requirements-" + $ver + "_" + $arch + ".txt" - Write-Host ("conda " + $args) - Start-Executable -FilePath "$conda_path" -ArgumentList $args -} - -function UpdateConda ($python_home) { - $conda_path = $python_home + "\Scripts\conda.exe" - Write-Host "Updating conda..." - $args = "update --yes conda" - Write-Host $conda_path $args - Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -} - - -function main () { - InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON - UpdateConda $env:PYTHON - InstallCondaPackages $env:PYTHON "pip setuptools nose" - InstallCondaPackagesFromFile $env:PYTHON $env:PYTHON_VERSION $env:PYTHON_ARCH -} - -main \ No newline at end of file diff --git a/ci/requirements-3.6-64.run b/ci/requirements-3.6-64.run new file mode 100644 index 0000000000000..58ba103504b2c --- /dev/null +++ b/ci/requirements-3.6-64.run @@ -0,0 +1,13 @@ +python-dateutil +pytz +numpy +openpyxl +xlsxwriter +xlrd +#xlwt +scipy +feather-format +numexpr +pytables +matplotlib +blosc diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 8db0e6202f7fc..4b8e7714a911e 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -448,6 +448,7 @@ def test_read_excel_blank_with_header(self): # GH 12292 : error when read one empty column from excel file def test_read_one_empty_col_no_header(self): + _skip_if_no_xlwt() df = pd.DataFrame( [["", 1, 100], ["", 2, 200], @@ -504,6 +505,7 @@ def test_read_one_empty_col_with_header(self): tm.assert_frame_equal(actual_header_zero, expected_header_zero) def test_set_column_names_in_parameter(self): + _skip_if_no_xlwt() # GH 12870 : pass down column names associated with # keyword argument names refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
CLN: clean up appveyor.yml a bit
https://api.github.com/repos/pandas-dev/pandas/pulls/15088
2017-01-09T17:52:17Z
2017-01-09T17:52:23Z
2017-01-09T17:52:23Z
2017-01-09T19:52:50Z
BUG: Value error aggregate item by item
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0148a47068beb..fd522f2bcf91e 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -318,7 +318,7 @@ Bug Fixes - Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) - Bug in ``pd.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14915`) - +- Bug in ``_GroupBy`` where a ``ValueError`` is raised without a message. (:issue:`15082`) - Bug in ``Series.iloc`` where a ``Categorical`` object for list-like indexes input was returned, where a ``Series`` was expected. (:issue:`14580`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7eba32b4932d0..322fa70c4acb0 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -588,8 +588,8 @@ def curried(x): try: return self._aggregate_item_by_item(name, *args, **kwargs) - except (AttributeError): - raise ValueError + except (AttributeError) as e: + raise ValueError(e) return wrapper diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e87b5d04271e8..d147993fe2f54 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -772,6 +772,31 @@ def max_value(group): 'int64': 1}).sort_values() assert_series_equal(result, expected) + def test_groupby_aggregate_item_by_item(self): + def test_df(): + s = pd.DataFrame(np.array([[13, 14, 15, 16]]), + index=[0], + columns=['b', 'c', 'd', 'e']) + a1 = [s, s, datetime.strptime('2016-12-28', "%Y-%m-%d"), 'asdf', 2] + a2 = [s, s, datetime.strptime('2016-12-28', "%Y-%m-%d"), 'asdf', 6] + num = np.array([a1, a2]) + columns = ['b', 'c', 'd', 'e', 'f'] + idx = [x for x in xrange(0, len(num))] + return pd.DataFrame(num, index=idx, columns=columns) + c = [test_df().sort_values(['d', 'e', 'f']), + test_df().sort_values(['d', 'e', 'f'])] + df = pd.concat(c) + df = df[["e", "a"]].copy().reset_index(drop=True) + df["e_idx"] = df["e"] + what = [0, 0.5, 0.5, 1] + + def x(): + df.groupby(["e_idx", "e"])["a"].quantile(what) + self.assertRaisesRegexp(ValueError, + "'SeriesGroupBy' object " + "has no attribute '_aggregate_item_by_item'", + x) + def test_groupby_return_type(self): # GH2893, return a reduced type
- [x] closes #15082 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/15083
2017-01-08T08:51:44Z
2017-01-09T18:22:22Z
null
2017-01-09T18:22:35Z
Allow indices to be mapped through through dictionaries or series
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 3c0e2869357ae..5e8cf3a0350bb 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -123,6 +123,30 @@ def time_series_dropna_datetime(self): self.s.dropna() +class series_map_dict(object): + goal_time = 0.2 + + def setup(self): + map_size = 1000 + self.s = Series(np.random.randint(0, map_size, 10000)) + self.map_dict = {i: map_size - i for i in range(map_size)} + + def time_series_map_dict(self): + self.s.map(self.map_dict) + + +class series_map_series(object): + goal_time = 0.2 + + def setup(self): + map_size = 1000 + self.s = Series(np.random.randint(0, map_size, 10000)) + self.map_series = Series(map_size - np.arange(map_size)) + + def time_series_map_series(self): + self.s.map(self.map_series) + + class series_clip(object): goal_time = 0.2 diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 5549ba4e8f735..f97b958d553e0 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -75,6 +75,7 @@ Other API Changes - :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`) - `tseries.frequencies.get_freq_group()` and `tseries.frequencies.DAYS` are removed from the public API (:issue:`18034`) - :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`) +- :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`). - :func:`Dataframe.unstack` will now default to filling with ``np.nan`` for ``object`` columns. (:issue:`12815`) - :class:`IntervalIndex` constructor will raise if the ``closed`` parameter conflicts with how the input data is inferred to be closed (:issue:`18421`) @@ -108,6 +109,7 @@ Performance Improvements - Added a keyword argument, ``cache``, to :func:`to_datetime` that improved the performance of converting duplicate datetime arguments (:issue:`11665`) - :class`DateOffset` arithmetic performance is improved (:issue:`18218`) - Converting a ``Series`` of ``Timedelta`` objects to days, seconds, etc... sped up through vectorization of underlying methods (:issue:`18092`) +- Improved performance of ``.map()`` with a ``Series/dict`` input (:issue:`15081`) - The overriden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`) - ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`) - Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`) diff --git a/pandas/core/base.py b/pandas/core/base.py index 90fe350848bf7..cce0f384cb983 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -12,11 +12,12 @@ is_object_dtype, is_list_like, is_scalar, - is_datetimelike) + is_datetimelike, + is_extension_type) from pandas.util._validators import validate_bool_kwarg -from pandas.core import common as com +from pandas.core import common as com, algorithms import pandas.core.nanops as nanops import pandas._libs.lib as lib from pandas.compat.numpy import function as nv @@ -838,6 +839,78 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, klass=self.__class__.__name__, op=name)) return func(**kwds) + def _map_values(self, mapper, na_action=None): + """An internal function that maps values using the input + correspondence (which can be a dict, Series, or function). + + Parameters + ---------- + mapper : function, dict, or Series + The input correspondence object + na_action : {None, 'ignore'} + If 'ignore', propagate NA values, without passing them to the + mapping function + + Returns + ------- + applied : Union[Index, MultiIndex], inferred + The output of the mapping function applied to the index. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + + """ + + # we can fastpath dict/Series to an efficient map + # as we know that we are not going to have to yield + # python types + if isinstance(mapper, dict): + if hasattr(mapper, '__missing__'): + # If a dictionary subclass defines a default value method, + # convert mapper to a lookup function (GH #15999). + dict_with_default = mapper + mapper = lambda x: dict_with_default[x] + else: + # Dictionary does not have a default. Thus it's safe to + # convert to an Series for efficiency. + # we specify the keys here to handle the + # possibility that they are tuples + from pandas import Series + mapper = Series(mapper, index=mapper.keys()) + + if isinstance(mapper, ABCSeries): + # Since values were input this means we came from either + # a dict or a series and mapper should be an index + if is_extension_type(self.dtype): + values = self._values + else: + values = self.values + + indexer = mapper.index.get_indexer(values) + new_values = algorithms.take_1d(mapper._values, indexer) + + return new_values + + # we must convert to python types + if is_extension_type(self.dtype): + values = self._values + if na_action is not None: + raise NotImplementedError + map_f = lambda values, f: values.map(f) + else: + values = self.astype(object) + values = getattr(values, 'values', values) + if na_action == 'ignore': + def map_f(values, f): + return lib.map_infer_mask(values, f, + isna(values).view(np.uint8)) + else: + map_f = lib.map_infer + + # mapper is a function + new_values = map_f(values, mapper) + + return new_values + def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index bc8aacfe90170..a97b84ab9cc5b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1127,3 +1127,38 @@ def cast_scalar_to_array(shape, value, dtype=None): values.fill(fill_value) return values + + +def construct_1d_arraylike_from_scalar(value, length, dtype): + """ + create a np.ndarray / pandas type of specified shape and dtype + filled with values + + Parameters + ---------- + value : scalar value + length : int + dtype : pandas_dtype / np.dtype + + Returns + ------- + np.ndarray / pandas type of length, filled with value + + """ + if is_datetimetz(dtype): + from pandas import DatetimeIndex + subarr = DatetimeIndex([value] * length, dtype=dtype) + elif is_categorical_dtype(dtype): + from pandas import Categorical + subarr = Categorical([value] * length) + else: + if not isinstance(dtype, (np.dtype, type(np.dtype))): + dtype = dtype.dtype + + # coerce if we have nan for an integer dtype + if is_integer_dtype(dtype) and isna(value): + dtype = np.float64 + subarr = np.empty(length, dtype=dtype) + subarr.fill(value) + + return subarr diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 7cae536c5edd9..ce57b544d9d66 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -369,13 +369,14 @@ def _maybe_fill(arr, fill_value=np.nan): return arr -def na_value_for_dtype(dtype): +def na_value_for_dtype(dtype, compat=True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype + compat : boolean, default True Returns ------- @@ -389,7 +390,9 @@ def na_value_for_dtype(dtype): elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): - return 0 + if compat: + return 0 + return np.nan elif is_bool_dtype(dtype): return False return np.nan diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index af9e29a84b472..8a751f0204b60 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -13,7 +13,6 @@ from pandas.compat.numpy import function as nv from pandas import compat - from pandas.core.dtypes.generic import ( ABCSeries, ABCMultiIndex, @@ -2827,6 +2826,27 @@ def get_indexer_for(self, target, **kwargs): indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer + _index_shared_docs['_get_values_from_dict'] = """ + Return the values of the input dictionary in the order the keys are + in the index. np.nan is returned for index values not in the + dictionary. + + Parameters + ---------- + data : dict + The dictionary from which to extract the values + + Returns + ------- + np.array + + """ + + @Appender(_index_shared_docs['_get_values_from_dict']) + def _get_values_from_dict(self, data): + return lib.fast_multiget(data, self.values, + default=np.nan) + def _maybe_promote(self, other): # A hack, but it works from pandas.core.indexes.datetimes import DatetimeIndex @@ -2865,13 +2885,15 @@ def groupby(self, values): return result - def map(self, mapper): - """Apply mapper function to an index. + def map(self, mapper, na_action=None): + """Map values of Series using input correspondence Parameters ---------- - mapper : callable - Function to be applied. + mapper : function, dict, or Series + na_action : {None, 'ignore'} + If 'ignore', propagate NA values, without passing them to the + mapping function Returns ------- @@ -2881,15 +2903,26 @@ def map(self, mapper): a MultiIndex will be returned. """ + from .multi import MultiIndex - mapped_values = self._arrmap(self.values, mapper) + new_values = super(Index, self)._map_values( + mapper, na_action=na_action) attributes = self._get_attributes_dict() - if mapped_values.size and isinstance(mapped_values[0], tuple): - return MultiIndex.from_tuples(mapped_values, - names=attributes.get('name')) + if new_values.size and isinstance(new_values[0], tuple): + if isinstance(self, MultiIndex): + names = self.names + elif attributes.get('name'): + names = [attributes.get('name')] * len(new_values[0]) + else: + names = None + return MultiIndex.from_tuples(new_values, + names=names) attributes['copy'] = False - return Index(mapped_values, **attributes) + + # we infer the result types based on the + # returned values + return Index(new_values, **attributes) def isin(self, values, level=None): """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 4934ccb49b844..5643d886a4fec 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -136,7 +136,7 @@ def equals(self, other): elif not isinstance(other, type(self)): try: other = type(self)(other) - except: + except Exception: return False if not is_dtype_equal(self.dtype, other.dtype): @@ -352,7 +352,7 @@ def map(self, f): # Try to use this result if we can if isinstance(result, np.ndarray): - self._shallow_copy(result) + result = Index(result) if not isinstance(result, Index): raise TypeError('The map function must return an Index object') @@ -698,6 +698,14 @@ def __rsub__(self, other): def _add_delta(self, other): return NotImplemented + @Appender(_index_shared_docs['_get_values_from_dict']) + def _get_values_from_dict(self, data): + if len(data): + return np.array([data.get(i, np.nan) + for i in self.asobject.values]) + + return np.array([np.nan]) + def _add_delta_td(self, other): # add a delta of a timedeltalike # return the i8 result view diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 111ba0c92aa9b..e1def38289243 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1457,6 +1457,17 @@ def get_value_maybe_box(self, series, key): key, tz=self.tz) return _maybe_box(self, values, series, key) + @Appender(_index_shared_docs['_get_values_from_dict']) + def _get_values_from_dict(self, data): + if len(data): + # coerce back to datetime objects for lookup + data = com._dict_compat(data) + return lib.fast_multiget(data, + self.asobject.values, + default=np.nan) + + return np.array([np.nan]) + def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label diff --git a/pandas/core/series.py b/pandas/core/series.py index d7833526c0408..bff7c21ad69b1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -18,7 +18,7 @@ is_bool, is_integer, is_integer_dtype, is_float_dtype, - is_extension_type, is_datetimetz, + is_extension_type, is_datetime64tz_dtype, is_timedelta64_dtype, is_list_like, @@ -34,7 +34,8 @@ from pandas.core.dtypes.cast import ( maybe_upcast, infer_dtype_from_scalar, maybe_convert_platform, - maybe_cast_to_datetime, maybe_castable) + maybe_cast_to_datetime, maybe_castable, + construct_1d_arraylike_from_scalar) from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.common import (is_bool_indexer, @@ -45,7 +46,6 @@ _maybe_match_name, SettingWithCopyError, _maybe_box_datetimelike, - _dict_compat, standardize_mapping, _any_none) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, @@ -203,23 +203,9 @@ def __init__(self, data=None, index=None, dtype=None, name=None, index = Index(data) else: index = Index(_try_sort(data)) + try: - if isinstance(index, DatetimeIndex): - if len(data): - # coerce back to datetime objects for lookup - data = _dict_compat(data) - data = lib.fast_multiget(data, - index.asobject.values, - default=np.nan) - else: - data = np.nan - # GH #12169 - elif isinstance(index, (PeriodIndex, TimedeltaIndex)): - data = ([data.get(i, np.nan) for i in index] - if data else np.nan) - else: - data = lib.fast_multiget(data, index.values, - default=np.nan) + data = index._get_values_from_dict(data) except TypeError: data = ([data.get(i, np.nan) for i in index] if data else np.nan) @@ -2338,41 +2324,8 @@ def map(self, arg, na_action=None): 3 0 dtype: int64 """ - - if is_extension_type(self.dtype): - values = self._values - if na_action is not None: - raise NotImplementedError - map_f = lambda values, f: values.map(f) - else: - values = self.asobject - - if na_action == 'ignore': - def map_f(values, f): - return lib.map_infer_mask(values, f, - isna(values).view(np.uint8)) - else: - map_f = lib.map_infer - - if isinstance(arg, dict): - if hasattr(arg, '__missing__'): - # If a dictionary subclass defines a default value method, - # convert arg to a lookup function (GH #15999). - dict_with_default = arg - arg = lambda x: dict_with_default[x] - else: - # Dictionary does not have a default. Thus it's safe to - # convert to an indexed series for efficiency. - arg = self._constructor(arg, index=arg.keys()) - - if isinstance(arg, Series): - # arg is a Series - indexer = arg.index.get_indexer(values) - new_values = algorithms.take_1d(arg._values, indexer) - else: - # arg is a function - new_values = map_f(values, arg) - + new_values = super(Series, self)._map_values( + arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__(self) @@ -3248,21 +3201,6 @@ def _try_cast(arr, take_fast_path): else: subarr = _try_cast(data, False) - def create_from_value(value, index, dtype): - # return a new empty value suitable for the dtype - - if is_datetimetz(dtype): - subarr = DatetimeIndex([value] * len(index), dtype=dtype) - elif is_categorical_dtype(dtype): - subarr = Categorical([value] * len(index)) - else: - if not isinstance(dtype, (np.dtype, type(np.dtype))): - dtype = dtype.dtype - subarr = np.empty(len(index), dtype=dtype) - subarr.fill(value) - - return subarr - # scalar like, GH if getattr(subarr, 'ndim', 0) == 0: if isinstance(data, list): # pragma: no cover @@ -3277,7 +3215,8 @@ def create_from_value(value, index, dtype): # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) - subarr = create_from_value(value, index, dtype) + subarr = construct_1d_arraylike_from_scalar( + value, len(index), dtype) else: return subarr.item() @@ -3288,8 +3227,8 @@ def create_from_value(value, index, dtype): # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: - subarr = create_from_value(subarr[0], index, - subarr.dtype) + subarr = construct_1d_arraylike_from_scalar( + subarr[0], len(index), subarr.dtype) elif subarr.ndim > 1: if isinstance(data, np.ndarray): diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 43b20f420eb48..ee6434431bcfc 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1005,3 +1005,30 @@ def test_searchsorted_monotonic(self, indices): # non-monotonic should raise. with pytest.raises(ValueError): indices._searchsorted_monotonic(value, side='left') + + def test_map(self): + index = self.create_index() + + # From output of UInt64Index mapping can't infer that we + # shouldn't default to Int64 + if isinstance(index, UInt64Index): + expected = Index(index.values.tolist()) + else: + expected = index + + tm.assert_index_equal(index.map(lambda x: x), expected) + + identity_dict = {x: x for x in index} + tm.assert_index_equal(index.map(identity_dict), expected) + + # Use values to work around MultiIndex instantiation of series + identity_series = Series(expected.values, index=index) + tm.assert_index_equal(index.map(identity_series), expected) + + # empty mappable + nan_index = pd.Index([np.nan] * len(index)) + series_map = pd.Series() + tm.assert_index_equal(index.map(series_map), nan_index) + + dict_map = {} + tm.assert_index_equal(index.map(dict_map), nan_index) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 12b509d4aef3f..839fccc1441e5 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -1,5 +1,7 @@ """ generic datetimelike tests """ - +import pytest +import pandas as pd +import numpy as np from .common import Base import pandas.util.testing as tm @@ -38,3 +40,39 @@ def test_view(self, indices): i_view = i.view(self._holder) result = self._holder(i) tm.assert_index_equal(result, i_view) + + def test_map_callable(self): + + expected = self.index + 1 + result = self.index.map(lambda x: x + 1) + tm.assert_index_equal(result, expected) + + # map to NaT + result = self.index.map(lambda x: pd.NaT if x == self.index[0] else x) + expected = pd.Index([pd.NaT] + self.index[1:].tolist()) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: pd.Series(values, index)]) + def test_map_dictlike(self, mapper): + expected = self.index + 1 + + # don't compare the freqs + if isinstance(expected, pd.DatetimeIndex): + expected.freq = None + + result = self.index.map(mapper(expected, self.index)) + tm.assert_index_equal(result, expected) + + expected = pd.Index([pd.NaT] + self.index[1:].tolist()) + result = self.index.map(mapper(expected, self.index)) + tm.assert_index_equal(result, expected) + + # empty map; these map to np.nan because we cannot know + # to re-infer things + expected = pd.Index([np.nan] * len(self.index)) + result = self.index.map(mapper([], [])) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 52558c27ce707..9d5746e07814e 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -683,11 +683,9 @@ def test_pickle_freq(self): assert new_prng.freqstr == 'M' def test_map(self): - index = PeriodIndex([2005, 2007, 2009], freq='A') - result = index.map(lambda x: x + 1) - expected = index + 1 - tm.assert_index_equal(result, expected) + # test_map_dictlike generally tests + index = PeriodIndex([2005, 2007, 2009], freq='A') result = index.map(lambda x: x.ordinal) exp = Index([x.ordinal for x in index]) tm.assert_index_equal(result, exp) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 99a99cc5cc3eb..f5016e6d19a57 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -4,6 +4,8 @@ from datetime import datetime, timedelta +from collections import defaultdict + import pandas.util.testing as tm from pandas.core.dtypes.common import is_unsigned_integer_dtype from pandas.core.indexes.api import Index, MultiIndex @@ -844,6 +846,64 @@ def test_map_tseries_indices_return_index(self): exp = Index(range(24), name='hourly') tm.assert_index_equal(exp, date_index.map(lambda x: x.hour)) + def test_map_with_dict_and_series(self): + # GH 12756 + expected = Index(['foo', 'bar', 'baz']) + mapper = Series(expected.values, index=[0, 1, 2]) + result = tm.makeIntIndex(3).map(mapper) + tm.assert_index_equal(result, expected) + + for name in self.indices.keys(): + if name == 'catIndex': + # Tested in test_categorical + continue + elif name == 'repeats': + # Cannot map duplicated index + continue + + cur_index = self.indices[name] + expected = Index(np.arange(len(cur_index), 0, -1)) + mapper = pd.Series(expected, index=cur_index) + result = cur_index.map(mapper) + + tm.assert_index_equal(result, expected) + + # If the mapper is empty the expected index type is Int64Index + # but the output defaults to Float64 so I treat it independently + mapper = {o: n for o, n in + zip(cur_index, expected)} + + result = cur_index.map(mapper) + if not mapper: + expected = Float64Index([]) + tm.assert_index_equal(result, expected) + + def test_map_with_non_function_missing_values(self): + # GH 12756 + expected = Index([2., np.nan, 'foo']) + input = Index([2, 1, 0]) + + mapper = Series(['foo', 2., 'baz'], index=[0, 2, -1]) + tm.assert_index_equal(expected, input.map(mapper)) + + mapper = {0: 'foo', 2: 2.0, -1: 'baz'} + tm.assert_index_equal(expected, input.map(mapper)) + + def test_map_na_exclusion(self): + idx = Index([1.5, np.nan, 3, np.nan, 5]) + + result = idx.map(lambda x: x * 2, na_action='ignore') + exp = idx * 2 + tm.assert_index_equal(result, exp) + + def test_map_defaultdict(self): + idx = Index([1, 2, 3]) + default_dict = defaultdict(lambda: 'blank') + default_dict[1] = 'stuff' + result = idx.map(default_dict) + expected = Index(['stuff', 'blank', 'blank']) + tm.assert_index_equal(result, expected) + def test_append_multiple(self): index = Index(['a', 'b', 'c', 'd', 'e', 'f']) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 5e6898f9c8711..92d5a53f6570b 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -269,6 +269,24 @@ def f(x): ordered=False) tm.assert_index_equal(result, exp) + result = ci.map(pd.Series([10, 20, 30], index=['A', 'B', 'C'])) + tm.assert_index_equal(result, exp) + + result = ci.map({'A': 10, 'B': 20, 'C': 30}) + tm.assert_index_equal(result, exp) + + def test_map_with_categorical_series(self): + # GH 12756 + a = pd.Index([1, 2, 3, 4]) + b = pd.Series(["even", "odd", "even", "odd"], + dtype="category") + c = pd.Series(["even", "odd", "even", "odd"]) + + exp = CategoricalIndex(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(b), exp) + exp = pd.Index(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(c), exp) + @pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series]) def test_where(self, klass): i = self.create_index() diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 7d6f544f6d533..b17d241ff50e0 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -530,6 +530,10 @@ def test_repr_max_seq_item_setting(self): def test_repr_roundtrip(self): super(TestIntervalIndex, self).test_repr_roundtrip() + @pytest.mark.xfail(reason='get_indexer behavior does not currently work') + def test_map(self): + super(TestIntervalIndex, self).test_map() + def test_get_item(self, closed): i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 533b06088f1bf..e25384ebf7d62 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -187,6 +187,7 @@ def test_misc_coverage(self): assert not idx.equals(list(non_td)) def test_map(self): + # test_map_dictlike generally tests rng = timedelta_range('1 day', periods=10) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 2c93d2afd1760..22b3fd9073bab 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -439,3 +439,21 @@ def test_indexing_with_category(self): res = (cat[['A']] == 'foo') tm.assert_frame_equal(res, exp) + + def test_map_with_dict_or_series(self): + orig_values = ['a', 'B', 1, 'a'] + new_values = ['one', 2, 3.0, 'one'] + cur_index = pd.CategoricalIndex(orig_values, name='XXX') + expected = pd.CategoricalIndex(new_values, + name='XXX', categories=[3.0, 2, 'one']) + + mapper = pd.Series(new_values[:-1], index=orig_values[:-1]) + output = cur_index.map(mapper) + # Order of categories in output can be different + tm.assert_index_equal(expected, output) + + mapper = {o: n for o, n in + zip(orig_values[:-1], new_values[:-1])} + output = cur_index.map(mapper) + # Order of categories in output can be different + tm.assert_index_equal(expected, output) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index d0693984689a6..fe21ba569ae99 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -424,6 +424,7 @@ def test_map_dict_with_tuple_keys(self): """ df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]}) label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'} + df['labels'] = df['a'].map(label_mappings) df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index) # All labels should be filled now
- [X] closes #12756 - [X] tests added / passed - [X] passes ``git diff upstream/master | flake8 --diff`` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/15081
2017-01-08T00:17:29Z
2017-11-25T14:24:16Z
2017-11-25T14:24:15Z
2017-11-25T18:22:47Z
Pypy fixes
diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 761969491cfc7..e3ee468e1a23b 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -26,9 +26,9 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem, PyBytes_GET_SIZE, PyUnicode_GET_SIZE) -try: +IF PY_MAJOR_VERSION == 2: from cpython cimport PyString_GET_SIZE -except ImportError: +ELSE: from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE cdef extern from "Python.h": diff --git a/pandas/src/hashtable_class_helper.pxi.in b/pandas/src/hashtable_class_helper.pxi.in index b26839599ef38..6bf94c9366c8a 100644 --- a/pandas/src/hashtable_class_helper.pxi.in +++ b/pandas/src/hashtable_class_helper.pxi.in @@ -691,7 +691,11 @@ cdef class PyObjectHashTable(HashTable): def __dealloc__(self): if self.table is not NULL: - self.destroy() + # cython documentation - + # "don’t call any other methods of the object" + # self.destroy() + kh_destroy_pymap(self.table) + self.table = NULL def __len__(self): return self.table.size diff --git a/pandas/src/ujson/python/JSONtoObj.c b/pandas/src/ujson/python/JSONtoObj.c index b0132532c16af..6be4584b854f5 100644 --- a/pandas/src/ujson/python/JSONtoObj.c +++ b/pandas/src/ujson/python/JSONtoObj.c @@ -427,7 +427,11 @@ int Object_npyObjectAddKey(void *prv, JSOBJ obj, JSOBJ name, JSOBJ value) { // only fill label array once, assumes all column labels are the same // for 2-dimensional arrays. +#ifdef PYPY_VERSION + if (PyObject_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) { +#else if (PyList_GET_SIZE(npyarr->labels[labelidx]) <= npyarr->elcount) { +#endif PyList_Append(npyarr->labels[labelidx], label); } diff --git a/setup.py b/setup.py index 0a84cf527bfb1..015db43afada0 100755 --- a/setup.py +++ b/setup.py @@ -150,6 +150,12 @@ def build_extensions(self): with open(outfile, "w") as f: f.write(pyxcontent) + # used in lib.pyx, assumes old_build_ext + if not self.cython_compile_time_env: + self.cython_compile_time_env = {} + major = sys.version_info.major + self.cython_compile_time_env['PY_MAJOR_VERSION'] = major + numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') for ext in self.extensions:
benign changes visa-vis CPython needed to build pandas for PyPy 2.7
https://api.github.com/repos/pandas-dev/pandas/pulls/15080
2017-01-07T19:25:36Z
2017-01-10T20:32:31Z
null
2017-01-10T20:46:50Z
PERF: improve perf of float-based timeseries plotting
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index 3350ddaccc496..757c3e27dd333 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -20,6 +20,9 @@ def setup(self): def time_plot_regular(self): self.df.plot() + def time_plot_regular_compat(self): + self.df.plot(x_compat=True) + class Misc(object): goal_time = 0.6 diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 8b6b765a81dba..da13f724eb663 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -281,10 +281,14 @@ Performance Improvements - Improved performance of ``pd.wide_to_long()`` (:issue:`14779`) - Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`) +- Improved performance of timeseries plotting with an irregular DatetimeIndex + (or with ``compat_x=True``) (:issue:`15073`). + - When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. + .. _whatsnew_0200.bug_fixes: Bug Fixes diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index 8f8519a498a31..95ff9578fa3ee 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -212,7 +212,7 @@ def try_parse(values): try: values = tools.to_datetime(values) if isinstance(values, Index): - values = values.map(_dt_to_float_ordinal) + values = _dt_to_float_ordinal(values) else: values = [_dt_to_float_ordinal(x) for x in values] except Exception: diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py index 7e4ed288e31c1..37d9c35639c32 100644 --- a/pandas/tseries/tests/test_converter.py +++ b/pandas/tseries/tests/test_converter.py @@ -3,7 +3,7 @@ import nose import numpy as np -from pandas import Timestamp, Period, Index +from pandas import Timestamp, Period from pandas.compat import u import pandas.util.testing as tm from pandas.tseries.offsets import Second, Milli, Micro @@ -104,8 +104,8 @@ def test_dateindex_conversion(self): for freq in ('B', 'L', 'S'): dateindex = tm.makeDateIndex(k=10, freq=freq) rs = self.dtc.convert(dateindex, None, None) - xp = Index(converter.dates.date2num(dateindex._mpl_repr())) - tm.assert_index_equal(rs, xp, decimals) + xp = converter.dates.date2num(dateindex._mpl_repr()) + tm.assert_almost_equal(rs, xp, decimals) def test_resolution(self): def _assert_less(ts1, ts2):
xref #15071 `_dt_to_float_ordinal` was already vectorized, so no need to map it on the index. So now is 'irregular' plotting even faster as period-based plotting: ``` $ asv continuous upstream/master HEAD -b TimeseriesPlotting ... · Running 4 total benchmarks (2 commits * 1 environments * 2 benchmarks) [ 0.00%] · For pandas commit hash 4fc0dca6: [ 0.00%] ·· Benchmarking conda-py2.7-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 25.00%] ··· Running plotting.TimeseriesPlotting.time_plot_regular 126.57ms [ 50.00%] ··· Running plotting.TimeseriesPlotting.time_plot_regular_compat 87.57ms [ 50.00%] · For pandas commit hash de09c988: [ 50.00%] ·· Benchmarking conda-py2.7-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 75.00%] ··· Running plotting.TimeseriesPlotting.time_plot_regular 126.01ms [100.00%] ··· Running plotting.TimeseriesPlotting.time_plot_regular_compat 305.58ms before after ratio [de09c988] [4fc0dca6] - 305.58ms 87.57ms 0.29 plotting.TimeseriesPlotting.time_plot_regular_compat ```
https://api.github.com/repos/pandas-dev/pandas/pulls/15073
2017-01-06T11:45:05Z
2017-01-09T19:03:48Z
2017-01-09T19:03:48Z
2017-01-10T00:35:45Z
Adds custom plot formatting for TimedeltaIndex.
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 2b2012dbf0b8a..d864eec0db7a3 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -1245,6 +1245,16 @@ in ``pandas.plot_params`` can be used in a `with statement`: plt.close('all') +Automatic Date Tick Adjustment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Especially for the ``TimedeltaIndex`` that uses native matplotlib +tick locator methods, it is useful to call the automatic +date tick adjustment from matplotlib for figures whose ticklabels overlap. + +See the :meth:`autofmt_xdate <matplotlib.figure.autofmt_xdate>` method and the +`matplotlib documentation <http://matplotlib.org/users/recipes.html#fixing-common-date-annoyances>`__ for more. + Subplots ~~~~~~~~ diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 86f916bc0acfb..9124929ee5665 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -155,7 +155,7 @@ Other enhancements - ``Series/DataFrame.squeeze()`` have gained the ``axis`` parameter. (:issue:`15339`) - ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`) - HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`) - +- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) .. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations .. _whatsnew_0200.api_breaking: diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 25568f7eb61dc..cdacded4d7f35 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -9,6 +9,7 @@ from pandas import Index, Series, DataFrame from pandas.tseries.index import date_range, bdate_range +from pandas.tseries.tdi import timedelta_range from pandas.tseries.offsets import DateOffset from pandas.tseries.period import period_range, Period, PeriodIndex from pandas.tseries.resample import DatetimeIndex @@ -1270,6 +1271,63 @@ def test_plot_outofbounds_datetime(self): values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] self.plt.plot(values) + def test_format_timedelta_ticks_narrow(self): + + expected_labels = [ + '00:00:00.00000000{:d}'.format(i) + for i in range(10)] + + rng = timedelta_range('0', periods=10, freq='ns') + df = DataFrame(np.random.randn(len(rng), 3), rng) + ax = df.plot(fontsize=2) + fig = ax.get_figure() + fig.canvas.draw() + labels = ax.get_xticklabels() + self.assertEqual(len(labels), len(expected_labels)) + for l, l_expected in zip(labels, expected_labels): + self.assertEqual(l.get_text(), l_expected) + + def test_format_timedelta_ticks_wide(self): + + expected_labels = [ + '00:00:00', + '1 days 03:46:40', + '2 days 07:33:20', + '3 days 11:20:00', + '4 days 15:06:40', + '5 days 18:53:20', + '6 days 22:40:00', + '8 days 02:26:40', + '' + ] + + rng = timedelta_range('0', periods=10, freq='1 d') + df = DataFrame(np.random.randn(len(rng), 3), rng) + ax = df.plot(fontsize=2) + fig = ax.get_figure() + fig.canvas.draw() + labels = ax.get_xticklabels() + self.assertEqual(len(labels), len(expected_labels)) + for l, l_expected in zip(labels, expected_labels): + self.assertEqual(l.get_text(), l_expected) + + def test_timedelta_plot(self): + # test issue #8711 + s = Series(range(5), timedelta_range('1day', periods=5)) + _check_plot_works(s.plot) + + # test long period + index = timedelta_range('1 day 2 hr 30 min 10 s', + periods=10, freq='1 d') + s = Series(np.random.randn(len(index)), index) + _check_plot_works(s.plot) + + # test short period + index = timedelta_range('1 day 2 hr 30 min 10 s', + periods=10, freq='1 ns') + s = Series(np.random.randn(len(index)), index) + _check_plot_works(s.plot) + def _check_plot_works(f, freq=None, series=None, *args, **kwargs): import matplotlib.pyplot as plt diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index b2050d7d8d81e..d46c38c117445 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1781,7 +1781,7 @@ def _ts_plot(cls, ax, x, data, style=None, **kwds): lines = cls._plot(ax, data.index, data.values, style=style, **kwds) # set date formatter, locators and rescale limits - format_dateaxis(ax, ax.freq) + format_dateaxis(ax, ax.freq, data.index) return lines def _get_stacking_id(self): diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index 95ff9578fa3ee..db7049ebc89b3 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -1000,3 +1000,33 @@ def __call__(self, x, pos=0): else: fmt = self.formatdict.pop(x, '') return Period(ordinal=int(x), freq=self.freq).strftime(fmt) + + +class TimeSeries_TimedeltaFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. + """ + + @staticmethod + def format_timedelta_ticks(x, pos, n_decimals): + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 1e9) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10**(n_decimals - 9)) + s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) + if n_decimals > 0: + s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) + if d != 0: + s = '{:d} days '.format(int(d)) + s + return s + + def __call__(self, x, pos=0): + (vmin, vmax) = tuple(self.axis.get_view_interval()) + n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) + if n_decimals > 9: + n_decimals = 9 + return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 89aecf2acc07e..4eddf54701889 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -12,11 +12,14 @@ from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies from pandas.tseries.index import DatetimeIndex +from pandas.tseries.period import PeriodIndex +from pandas.tseries.tdi import TimedeltaIndex from pandas.formats.printing import pprint_thing import pandas.compat as compat from pandas.tseries.converter import (TimeSeries_DateLocator, - TimeSeries_DateFormatter) + TimeSeries_DateFormatter, + TimeSeries_TimedeltaFormatter) # --------------------------------------------------------------------- # Plotting functions and monkey patches @@ -49,7 +52,7 @@ def tsplot(series, plotf, ax=None, **kwargs): lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) # set date formatter, locators and rescale limits - format_dateaxis(ax, ax.freq) + format_dateaxis(ax, ax.freq, series.index) return lines @@ -278,8 +281,24 @@ def _maybe_convert_index(ax, data): # Patch methods for subplot. Only format_dateaxis is currently used. # Do we need the rest for convenience? - -def format_dateaxis(subplot, freq): +def format_timedelta_ticks(x, pos, n_decimals): + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 1e9) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10**(n_decimals - 9)) + s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) + if n_decimals > 0: + s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) + if d != 0: + s = '{:d} days '.format(int(d)) + s + return s + + +def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). @@ -288,26 +307,38 @@ def format_dateaxis(subplot, freq): default, changing the limits of the x axis will intelligently change the positions of the ticks. """ - majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, - minor_locator=False, - plot_obj=subplot) - minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, - minor_locator=True, - plot_obj=subplot) - subplot.xaxis.set_major_locator(majlocator) - subplot.xaxis.set_minor_locator(minlocator) - - majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + + # handle index specific formatting + # Note: DatetimeIndex does not use this + # interface. DatetimeIndex uses matplotlib.date directly + if isinstance(index, PeriodIndex): + + majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) - minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) - subplot.xaxis.set_major_formatter(majformatter) - subplot.xaxis.set_minor_formatter(minformatter) - - # x and y coord info - subplot.format_coord = lambda t, y: ( - "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + subplot.xaxis.set_major_locator(majlocator) + subplot.xaxis.set_minor_locator(minlocator) + + majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + minor_locator=False, + plot_obj=subplot) + minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + minor_locator=True, + plot_obj=subplot) + subplot.xaxis.set_major_formatter(majformatter) + subplot.xaxis.set_minor_formatter(minformatter) + + # x and y coord info + subplot.format_coord = lambda t, y: ( + "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + + elif isinstance(index, TimedeltaIndex): + subplot.xaxis.set_major_formatter( + TimeSeries_TimedeltaFormatter()) + else: + raise TypeError('index type not supported') pylab.draw_if_interactive()
Instead of hacking the PeriodIndex plotting code to work for TimedeltaIndex, this just creates a simple formatter unique to TimedeltaIndex. It does fine for regular intervals up to a day, then the labels and run into eachothter. If this happens, you can use plt.autofmt_xdate() to fix it. - [x] closes #8711 - [ ] tests passed, none added - [x] passes ``git diff upstream/master | flake8 --diff`` ```python d.t_sensor_gyro_0__f_x.plot() plt.grid() ``` ![image](https://cloud.githubusercontent.com/assets/473772/21677741/76393d44-d309-11e6-90aa-b94fd4e2e989.png) ```python index = pd.timedelta_range('1 day 2 hr 30 min 10 s', periods=10, freq='1 d') s = pd.Series(np.random.randn(len(index)), index) s.plot() plt.gcf().autofmt_xdate() plt.grid() ``` ![image](https://cloud.githubusercontent.com/assets/473772/21677435/24779c86-d308-11e6-9350-2251a8eca334.png) ```python index = pd.timedelta_range('1 day 2 hr 30 min 10 s', periods=10, freq='1 ns') s = pd.Series(np.random.randn(len(index)), index) s.plot() plt.gcf().autofmt_xdate() plt.grid() ``` ![image](https://cloud.githubusercontent.com/assets/473772/21677429/1bd28794-d308-11e6-9ed6-72fa831a60f9.png) ```python index = pd.timedelta_range('0s', periods=10, freq='100000 ns') s = pd.Series(np.random.randn(len(index)), index) s.plot() plt.gcf().autofmt_xdate() plt.grid() plt.xlabel('time') plt.ylabel('y') ``` ![image](https://cloud.githubusercontent.com/assets/473772/21677938/7ef8392a-d30a-11e6-8c5c-f77cc0f49a7c.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/15067
2017-01-05T08:20:50Z
2017-02-22T16:06:33Z
null
2018-09-06T13:25:02Z
BUG: Patch missing data handling with usecols
diff --git a/doc/source/io.rst b/doc/source/io.rst index 9683fedb78303..dae97f7bc7f34 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1215,6 +1215,19 @@ You can elect to skip bad lines: 0 1 2 3 1 8 9 10 +You can also use the ``usecols`` parameter to eliminate extraneous column +data that appear in some lines but not others: + +.. code-block:: ipython + + In [30]: pd.read_csv(StringIO(data), usecols=[0, 1, 2]) + + Out[30]: + a b c + 0 1 2 3 + 1 4 5 6 + 2 8 9 10 + .. _io.quoting: Quoting and Escape Characters diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index c9ea7b427b3f2..ef731ad5e92df 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -306,6 +306,7 @@ Bug Fixes - Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`) - Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`) - Bug in ``pd.read_fwf`` where the skiprows parameter was not being respected during column width inference (:issue:`11256`) +- Bug in ``pd.read_csv()`` in which missing data was being improperly handled with ``usecols`` (:issue:`6710`) - Bug in ``DataFrame.loc`` with indexing a ``MultiIndex`` with a ``Series`` indexer (:issue:`14730`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 41f1ab6fc16fb..f2c3113fc2cdd 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2295,11 +2295,12 @@ def _infer_columns(self): columns = [lrange(ncols)] columns = self._handle_usecols(columns, columns[0]) else: - if self.usecols is None or len(names) == num_original_columns: + if self.usecols is None or len(names) >= num_original_columns: columns = self._handle_usecols([names], names) num_original_columns = len(names) else: - if self.usecols and len(names) != len(self.usecols): + if (not callable(self.usecols) and + len(names) != len(self.usecols)): raise ValueError( 'Number of passed names did not match number of ' 'header fields in the file' diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py index c654859f8dc7d..96790e872abc3 100644 --- a/pandas/io/tests/parser/usecols.py +++ b/pandas/io/tests/parser/usecols.py @@ -440,3 +440,28 @@ def test_callable_usecols(self): expected = DataFrame() df = self.read_csv(StringIO(s), usecols=lambda x: False) tm.assert_frame_equal(df, expected) + + def test_incomplete_first_row(self): + # see gh-6710 + data = '1,2\n1,2,3' + names = ['a', 'b', 'c'] + expected = DataFrame({'a': [1, 1], + 'c': [np.nan, 3]}) + + usecols = ['a', 'c'] + df = self.read_csv(StringIO(data), names=names, usecols=usecols) + tm.assert_frame_equal(df, expected) + + usecols = lambda x: x in ['a', 'c'] + df = self.read_csv(StringIO(data), names=names, usecols=usecols) + tm.assert_frame_equal(df, expected) + + def test_uneven_length_cols(self): + # see gh-8985 + usecols = [0, 1, 2] + data = '19,29,39\n' * 2 + '10,20,30,40' + expected = DataFrame([[19, 29, 39], + [19, 29, 39], + [10, 20, 30]]) + df = self.read_csv(StringIO(data), header=None, usecols=usecols) + tm.assert_frame_equal(df, expected) diff --git a/pandas/parser.pyx b/pandas/parser.pyx index c5082e999d19c..7b31f7fe27c1e 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -1317,7 +1317,8 @@ cdef class TextReader: cdef _get_column_name(self, Py_ssize_t i, Py_ssize_t nused): if self.has_usecols and self.names is not None: - if len(self.names) == len(self.usecols): + if (not callable(self.usecols) and + len(self.names) == len(self.usecols)): return self.names[nused] else: return self.names[i - self.leading_cols]
Patch handling of cases when the first row of a CSV is incomplete and `usecols` is specified. Closes #6710. Closes #8985. xref #14782.
https://api.github.com/repos/pandas-dev/pandas/pulls/15066
2017-01-05T05:05:57Z
2017-01-11T07:57:21Z
2017-01-11T07:57:21Z
2017-01-11T08:02:49Z
testing another pytest commit
diff --git a/.gitignore b/.gitignore index a77e780f3332d..808d9fb73a631 100644 --- a/.gitignore +++ b/.gitignore @@ -56,6 +56,8 @@ dist **/wheelhouse/* # coverage .coverage +coverage.xml +coverage_html_report # OS generated files # ###################### diff --git a/.travis.yml b/.travis.yml index b2a1a8a63cfe6..bbc475e36f305 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,7 +32,7 @@ matrix: env: - PYTHON_VERSION=3.5 - JOB_NAME: "35_osx" - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - BUILD_TYPE=conda - JOB_TAG=_OSX - TRAVIS_PYTHON_VERSION=3.5 @@ -42,7 +42,7 @@ matrix: env: - PYTHON_VERSION=2.7 - JOB_NAME: "27_slow_nnet_LOCALE" - - NOSE_ARGS="slow and not network and not disabled" + - NOSE_ARGS="--skip-network" - LOCALE_OVERRIDE="zh_CN.UTF-8" - FULL_DEPS=true - JOB_TAG=_LOCALE @@ -56,7 +56,7 @@ matrix: env: - PYTHON_VERSION=2.7 - JOB_NAME: "27_nslow" - - NOSE_ARGS="not slow and not disabled" + - NOSE_ARGS="--skip-slow" - FULL_DEPS=true - CLIPBOARD_GUI=gtk2 - LINT=true @@ -70,7 +70,7 @@ matrix: env: - PYTHON_VERSION=3.5 - JOB_NAME: "35_nslow" - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - FULL_DEPS=true - CLIPBOARD=xsel - COVERAGE=true @@ -84,7 +84,7 @@ matrix: env: - PYTHON_VERSION=3.6 - JOB_NAME: "36" - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - PANDAS_TESTING_MODE="deprecate" addons: apt: @@ -96,7 +96,7 @@ matrix: env: - PYTHON_VERSION=2.7 - JOB_NAME: "27_nslow_nnet_COMPAT" - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - LOCALE_OVERRIDE="it_IT.UTF-8" - INSTALL_TEST=true - JOB_TAG=_COMPAT @@ -112,7 +112,7 @@ matrix: - PYTHON_VERSION=2.7 - JOB_NAME: "27_slow" - JOB_TAG=_SLOW - - NOSE_ARGS="slow and not network and not disabled" + - NOSE_ARGS="--skip-network" - FULL_DEPS=true - CACHE_NAME="27_slow" - USE_CACHE=true @@ -122,7 +122,7 @@ matrix: - PYTHON_VERSION=2.7 - JOB_NAME: "27_build_test_conda" - JOB_TAG=_BUILD_TEST - - NOSE_ARGS="not slow and not disabled" + - NOSE_ARGS="--skip-slow" - FULL_DEPS=true - BUILD_TEST=true - CACHE_NAME="27_build_test_conda" @@ -132,7 +132,7 @@ matrix: env: - PYTHON_VERSION=3.4 - JOB_NAME: "34_nslow" - - NOSE_ARGS="not slow and not disabled" + - NOSE_ARGS="--skip-slow" - FULL_DEPS=true - CLIPBOARD=xsel - CACHE_NAME="34_nslow" @@ -147,7 +147,7 @@ matrix: - PYTHON_VERSION=3.4 - JOB_NAME: "34_slow" - JOB_TAG=_SLOW - - NOSE_ARGS="slow and not network and not disabled" + - NOSE_ARGS="--skip-network" - FULL_DEPS=true - CLIPBOARD=xsel - CACHE_NAME="34_slow" @@ -162,7 +162,7 @@ matrix: - PYTHON_VERSION=3.5 - JOB_NAME: "35_numpy_dev" - JOB_TAG=_NUMPY_DEV - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - PANDAS_TESTING_MODE="deprecate" - CACHE_NAME="35_numpy_dev" - USE_CACHE=true @@ -177,7 +177,7 @@ matrix: - PYTHON_VERSION=3.5 - JOB_NAME: "35_ascii" - JOB_TAG=_ASCII - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - LOCALE_OVERRIDE="C" - CACHE_NAME="35_ascii" - USE_CACHE=true @@ -197,7 +197,7 @@ matrix: - PYTHON_VERSION=2.7 - JOB_NAME: "27_slow" - JOB_TAG=_SLOW - - NOSE_ARGS="slow and not network and not disabled" + - NOSE_ARGS=--skip-network" - FULL_DEPS=true - CACHE_NAME="27_slow" - USE_CACHE=true @@ -206,7 +206,7 @@ matrix: - PYTHON_VERSION=3.4 - JOB_NAME: "34_slow" - JOB_TAG=_SLOW - - NOSE_ARGS="slow and not network and not disabled" + - NOSE_ARGS=--skip-network" - FULL_DEPS=true - CLIPBOARD=xsel - CACHE_NAME="34_slow" @@ -220,7 +220,7 @@ matrix: - PYTHON_VERSION=2.7 - JOB_NAME: "27_build_test_conda" - JOB_TAG=_BUILD_TEST - - NOSE_ARGS="not slow and not disabled" + - NOSE_ARGS="--skip-slow" - FULL_DEPS=true - BUILD_TEST=true - CACHE_NAME="27_build_test_conda" @@ -229,7 +229,7 @@ matrix: env: - PYTHON_VERSION=3.4 - JOB_NAME: "34_nslow" - - NOSE_ARGS="not slow and not disabled" + - NOSE_ARGS="--skip-slow" - FULL_DEPS=true - CLIPBOARD=xsel - CACHE_NAME="34_nslow" @@ -243,7 +243,7 @@ matrix: - PYTHON_VERSION=3.5 - JOB_NAME: "35_numpy_dev" - JOB_TAG=_NUMPY_DEV - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - PANDAS_TESTING_MODE="deprecate" - CACHE_NAME="35_numpy_dev" - USE_CACHE=true @@ -256,7 +256,7 @@ matrix: env: - PYTHON_VERSION=2.7 - JOB_NAME: "27_nslow_nnet_COMPAT" - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - LOCALE_OVERRIDE="it_IT.UTF-8" - INSTALL_TEST=true - JOB_TAG=_COMPAT @@ -271,7 +271,7 @@ matrix: - PYTHON_VERSION=3.5 - JOB_NAME: "35_ascii" - JOB_TAG=_ASCII - - NOSE_ARGS="not slow and not network and not disabled" + - NOSE_ARGS="--skip-slow --skip-network" - LOCALE_OVERRIDE="C" - CACHE_NAME="35_ascii" - USE_CACHE=true diff --git a/appveyor.yml b/appveyor.yml index a8e5218ab2c9f..9b122cc04d554 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -93,7 +93,7 @@ install: - cmd: '%CMD_IN_ENV% conda build ci\appveyor.recipe -q' # create our env - - cmd: conda create -q -n pandas python=%PYTHON_VERSION% nose + - cmd: conda create -q -n pandas python=%PYTHON_VERSION% nose pytest - cmd: activate pandas - SET REQ=ci\requirements-%PYTHON_VERSION%-%PYTHON_ARCH%.run - cmd: echo "installing requirements from %REQ%" @@ -105,4 +105,4 @@ test_script: - cd \ - cmd: activate pandas - cmd: conda list - - cmd: nosetests --exe -A "not slow and not network and not disabled" pandas + - cmd: pytest --skip-slow --skip-network -c %APPVEYOR_BUILD_FOLDER% pandas diff --git a/ci/install_appveyor.ps1 b/ci/install_appveyor.ps1 index a022995dc7d58..f0826f25c63a0 100644 --- a/ci/install_appveyor.ps1 +++ b/ci/install_appveyor.ps1 @@ -126,8 +126,8 @@ function UpdateConda ($python_home) { function main () { InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON UpdateConda $env:PYTHON - InstallCondaPackages $env:PYTHON "pip setuptools nose" + InstallCondaPackages $env:PYTHON "pip setuptools nose pytest" InstallCondaPackagesFromFile $env:PYTHON $env:PYTHON_VERSION $env:PYTHON_ARCH } -main \ No newline at end of file +main diff --git a/ci/install_test.sh b/ci/install_test.sh index e01ad7b94a349..9bfe817df4d49 100755 --- a/ci/install_test.sh +++ b/ci/install_test.sh @@ -8,7 +8,8 @@ if [ "$INSTALL_TEST" ]; then conda uninstall cython || exit 1 python "$TRAVIS_BUILD_DIR"/setup.py sdist --formats=zip,gztar || exit 1 pip install "$TRAVIS_BUILD_DIR"/dist/*tar.gz || exit 1 - nosetests --exe -A "$NOSE_ARGS" pandas/tests/test_series.py --with-xunit --xunit-file=/tmp/nosetests_install.xml + # nosetests --exe -A "$NOSE_ARGS" pandas/tests/test_series.py --with-xunit --xunit-file=/tmp/nosetests_install.xml + pytest pandas/tests/test_series.py --junitxml=/tmp/pytest_install.xml else echo "Skipping installation test." fi diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 542d22d9fa871..3eb0a07eecacf 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -82,6 +82,7 @@ else # Useful for debugging any issues with conda conda info -a || exit 1 + fi # may have installation instructions for this build @@ -91,10 +92,10 @@ if [ -e ${INSTALL} ]; then else # create new env - time conda create -n pandas python=$PYTHON_VERSION nose || exit 1 + time conda create -n pandas python=$PYTHON_VERSION nose pytest || exit 1 if [ "$COVERAGE" ]; then - pip install coverage + pip install coverage pytest-cov fi if [ "$LINT" ]; then conda install flake8 diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt index bc97957bff2b7..b64143fcd4ecd 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements_all.txt @@ -1,4 +1,6 @@ nose +pytest +pytest-cov flake8 sphinx ipython diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 7396fba6548d9..b8af9d035de98 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -3,4 +3,6 @@ pytz numpy cython nose +pytest +pytest-cov flake8 diff --git a/ci/script.sh b/ci/script.sh index e2ba883b81883..d024ab74d1c38 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -20,11 +20,23 @@ fi if [ "$BUILD_TEST" ]; then echo "We are not running nosetests as this is simply a build test." elif [ "$COVERAGE" ]; then - echo nosetests --exe -A "$NOSE_ARGS" pandas --with-coverage --with-xunit --xunit-file=/tmp/nosetests.xml - nosetests --exe -A "$NOSE_ARGS" pandas --with-coverage --cover-package=pandas --cover-tests --with-xunit --xunit-file=/tmp/nosetests.xml + echo pytest -s --cov=pandas --cov-report xml:/tmp/nosetests.xml $NOSE_ARGS pandas + pytest -s --cov=pandas --cov-report xml:/tmp/nosetests.xml $NOSE_ARGS pandas else - echo nosetests --exe -A "$NOSE_ARGS" pandas --doctest-tests --with-xunit --xunit-file=/tmp/nosetests.xml - nosetests --exe -A "$NOSE_ARGS" pandas --doctest-tests --with-xunit --xunit-file=/tmp/nosetests.xml + # XXX debugging + echo nosetests pandas/io/tests/test_stata.py + nosetests pandas/io/tests/test_stata.py + + echo pytest $NOSE_ARGS pandas + pytest $NOSE_ARGS pandas # TODO: doctest + + # XXX debugging + echo nosetests pandas/io/tests/test_stata.py + nosetests pandas/io/tests/test_stata.py + echo nosetests pandas/tools/tests/test_util.py + nosetests pandas/tools/tests/test_util.py + echo nosetests pandas/tseries/tests/test_timezones.py + nosetests pandas/tseries/tests/test_timezones.py fi RET="$?" diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000000000..bea77128ff915 --- /dev/null +++ b/conftest.py @@ -0,0 +1,21 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption("--skip-slow", action="store_true", + help="skip slow tests") + parser.addoption("--skip-network", action="store_true", + help="run network tests") + parser.addoption("--run-disabled", action="store_false", + help="run disabled tests") + + +def pytest_runtest_setup(item): + if 'slow' in item.keywords and item.config.getoption("--skip-slow"): + pytest.skip("skipping due to --skip-slow") + + if 'skip' in item.keywords and item.config.getoption("--skip-network"): + pytest.skip("skipping due to --skip-network") + + if 'disabled' in item.keywords and item.config.getoption("--run-disabled"): + pytest.skip("need --run-disabled option to run") diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index ecc2a5e723c45..0c76696c855a9 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -552,8 +552,8 @@ use cases and writing corresponding tests. Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore, it is worth getting in the habit of writing tests ahead of time so this is never an issue. -Like many packages, *pandas* uses the `Nose testing system -<https://nose.readthedocs.io/en/latest/index.html>`_ and the convenient +Like many packages, *pandas* uses `pytest` +<http://doc.pytest.org/en/latest/>`_ and the convenient extensions in `numpy.testing <http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_. @@ -595,15 +595,15 @@ Running the test suite The tests can then be run directly inside your Git clone (without having to install *pandas*) by typing:: - nosetests pandas + pytest pandas The tests suite is exhaustive and takes around 20 minutes to run. Often it is worth running only a subset of tests first around your changes before running the entire suite. This is done using one of the following constructs:: - nosetests pandas/tests/[test-module].py - nosetests pandas/tests/[test-module].py:[TestClass] - nosetests pandas/tests/[test-module].py:[TestClass].[test_method] + pytest pandas/tests/[test-module].py + pytest pandas/tests/[test-module].py::[TestClass] + pytest pandas/tests/[test-module].py::[TestClass]::[test_method] .. versionadded:: 0.18.0 diff --git a/doc/source/install.rst b/doc/source/install.rst index 4787b3356ee9f..5070f73f6a93e 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -188,8 +188,8 @@ Running the test suite pandas is equipped with an exhaustive set of unit tests covering about 97% of the codebase as of this writing. To run it on your machine to verify that everything is working (and you have all of the dependencies, soft and hard, -installed), make sure you have `nose -<https://nose.readthedocs.io/en/latest/>`__ and run: +installed), make sure you have `pytest +<http://doc.pytest.org/en/latest/>`__ and run: :: diff --git a/pandas/__init__.py b/pandas/__init__.py index 2d91c97144e3c..e8c01c6e08228 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -51,12 +51,7 @@ from pandas.tools.util import to_numeric from pandas.core.reshape import melt from pandas.util.print_versions import show_versions - -# define the testing framework -import pandas.util.testing -from pandas.util.nosetester import NoseTester -test = NoseTester().test -del NoseTester +from pandas.api.test import test # use the closest tagged version if possible from ._version import get_versions diff --git a/pandas/api/test.py b/pandas/api/test.py new file mode 100644 index 0000000000000..e76b95ea29bce --- /dev/null +++ b/pandas/api/test.py @@ -0,0 +1,19 @@ +""" +Entrypoint for testing from the top-level namespace +""" +import os + +PKG = os.path.dirname(os.path.dirname(__file__)) + + +try: + import pytest +except ImportError: + def test(): + raise ImportError("Need pytest>=3.0 to run tests") +else: + def test(): + pytest.main(['-q', PKG]) + + +__all__ = ['test'] diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py index b13b4d7de60ca..93a4479f67080 100644 --- a/pandas/api/tests/test_api.py +++ b/pandas/api/tests/test_api.py @@ -135,7 +135,7 @@ def test_api(self): class TestApi(Base, tm.TestCase): - allowed = ['tests', 'types'] + allowed = ['tests', 'types', 'test'] def test_api(self): @@ -229,6 +229,6 @@ def test_deprecation_access_obj(self): pd.datetools.monthEnd if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/computation/tests/test_compat.py b/pandas/computation/tests/test_compat.py index 80b415739c647..aa9bdfcce420e 100644 --- a/pandas/computation/tests/test_compat.py +++ b/pandas/computation/tests/test_compat.py @@ -65,5 +65,6 @@ def testit(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index ffa2cb0684b72..77d3c84437682 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -1972,5 +1972,6 @@ def test_negate_lt_eq_le(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d96fb094f5d5c..128fe1b02320a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5726,6 +5726,6 @@ def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/json/test_json_norm.py b/pandas/io/tests/json/test_json_norm.py index 36110898448ea..23649279aba89 100644 --- a/pandas/io/tests/json/test_json_norm.py +++ b/pandas/io/tests/json/test_json_norm.py @@ -1,5 +1,3 @@ -import nose - from pandas import DataFrame import numpy as np import json @@ -285,5 +283,6 @@ def test_json_normalize_errors(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', - '--pdb-failure', '-s'], exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/json/test_pandas.py b/pandas/io/tests/json/test_pandas.py index d7f903153fdae..473a053b383f7 100644 --- a/pandas/io/tests/json/test_pandas.py +++ b/pandas/io/tests/json/test_pandas.py @@ -1017,5 +1017,6 @@ def roundtrip(s, encoding='latin-1'): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', - '--pdb-failure', '-s'], exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/json/test_ujson.py b/pandas/io/tests/json/test_ujson.py index 704023bd847b7..4a1f0e2666084 100644 --- a/pandas/io/tests/json/test_ujson.py +++ b/pandas/io/tests/json/test_ujson.py @@ -1614,5 +1614,6 @@ def _clean_dict(d): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/parser/test_network.py b/pandas/io/tests/parser/test_network.py index 8e71cf1cc7e4c..8912483f3681f 100644 --- a/pandas/io/tests/parser/test_network.py +++ b/pandas/io/tests/parser/test_network.py @@ -183,5 +183,6 @@ def test_s3_fails(self): read_csv('s3://cant_get_it/') if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/parser/test_parsers.py b/pandas/io/tests/parser/test_parsers.py index a90f546d37fc8..0d26e6739a727 100644 --- a/pandas/io/tests/parser/test_parsers.py +++ b/pandas/io/tests/parser/test_parsers.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import os -import nose import pandas.util.testing as tm @@ -101,5 +100,6 @@ def read_table(self, *args, **kwds): return read_table(*args, **kwds) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/parser/test_textreader.py b/pandas/io/tests/parser/test_textreader.py index 98cb09cd85480..ce1221683ea38 100644 --- a/pandas/io/tests/parser/test_textreader.py +++ b/pandas/io/tests/parser/test_textreader.py @@ -10,7 +10,6 @@ import os import sys -import nose from numpy import nan import numpy as np @@ -404,5 +403,6 @@ def assert_array_dicts_equal(left, right): assert(np.array_equal(v, right[k])) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py index 64f31a11440d8..0a89ac570a564 100644 --- a/pandas/io/tests/parser/test_unsupported.py +++ b/pandas/io/tests/parser/test_unsupported.py @@ -9,8 +9,6 @@ test suite as new feature support is added to the parsers. """ -import nose - import pandas.io.parsers as parsers import pandas.util.testing as tm @@ -140,5 +138,6 @@ def test_deprecated_args(self): **kwargs) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_date_converters.py b/pandas/io/tests/test_date_converters.py index 3a0dd4eaa09e5..65f2c8d8aa73d 100644 --- a/pandas/io/tests/test_date_converters.py +++ b/pandas/io/tests/test_date_converters.py @@ -1,8 +1,6 @@ from pandas.compat import StringIO from datetime import date, datetime -import nose - import numpy as np from pandas import DataFrame, MultiIndex @@ -152,5 +150,6 @@ def test_parse_date_column_with_empty_string(self): assert_frame_equal(result, expected) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 8db0e6202f7fc..ed506c5e54480 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -2314,5 +2314,6 @@ def check_called(func): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py index 469e121f633d7..d2b9e58a80c39 100644 --- a/pandas/io/tests/test_ga.py +++ b/pandas/io/tests/test_ga.py @@ -194,5 +194,6 @@ def test_segment(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 8a414dcd3ba4f..8f6b179860db3 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -1226,5 +1226,6 @@ def test_upload_data_as_service_account_with_key_contents(self): self.assertEqual(result['NUM_ROWS'][0], test_size) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index f4eec864da572..d95ee2d8b443a 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -920,5 +920,6 @@ def test_same_ordering(): assert_framelist_equal(dfs_lxml, dfs_bs4) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py index 63c2ffc629ca6..5c284840ad910 100644 --- a/pandas/io/tests/test_packers.py +++ b/pandas/io/tests/test_packers.py @@ -795,18 +795,19 @@ class TestMsgpack(): http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class """ - def setUp(self): + @classmethod + def setup_class(cls): from pandas.io.tests.generate_legacy_storage_files import ( create_msgpack_data, create_data) - self.data = create_msgpack_data() - self.all_data = create_data() - self.path = u('__%s__.msgpack' % tm.rands(10)) - self.minimum_structure = {'series': ['float', 'int', 'mixed', - 'ts', 'mi', 'dup'], - 'frame': ['float', 'int', 'mixed', 'mi'], - 'panel': ['float'], - 'index': ['int', 'date', 'period'], - 'mi': ['reg2']} + cls.data = create_msgpack_data() + cls.all_data = create_data() + cls.path = u('__%s__.msgpack' % tm.rands(10)) + cls.minimum_structure = {'series': ['float', 'int', 'mixed', + 'ts', 'mi', 'dup'], + 'frame': ['float', 'int', 'mixed', 'mi'], + 'panel': ['float'], + 'index': ['int', 'date', 'period'], + 'mi': ['reg2']} def check_min_structure(self, data): for typ, v in self.minimum_structure.items(): diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index a49f50b1bcb9f..b258afcfad4b1 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -32,11 +32,12 @@ class TestPickle(): """ _multiprocess_can_split_ = True - def setUp(self): + @classmethod + def setup_class(cls): from pandas.io.tests.generate_legacy_storage_files import ( create_pickle_data) - self.data = create_pickle_data() - self.path = u('__%s__.pickle' % tm.rands(10)) + cls.data = create_pickle_data() + cls.path = u('__%s__.pickle' % tm.rands(10)) def compare_element(self, result, expected, typ, version=None): if isinstance(expected, Index): @@ -286,6 +287,6 @@ def test_pickle_v0_15_2(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index b23d0b89fe850..28c179c5a2c25 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -5514,6 +5514,6 @@ def _test_sort(obj): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_s3.py b/pandas/io/tests/test_s3.py index 8058698a906ea..2877d5b36e15e 100644 --- a/pandas/io/tests/test_s3.py +++ b/pandas/io/tests/test_s3.py @@ -1,4 +1,3 @@ -import nose from pandas.util import testing as tm from pandas.io.common import _is_s3_url @@ -10,5 +9,6 @@ def test_is_s3_url(self): self.assertFalse(_is_s3_url("s4://pandas/somethingelse.com")) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index cb08944e8dc57..1e93f5d6763e6 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -236,7 +236,7 @@ def _close_conn(self): pass -class PandasSQLTest(unittest.TestCase): +class PandasSQLTest(object): """ Base class with common private methods for SQLAlchemy and fallback cases. @@ -839,7 +839,7 @@ def test_unicode_column_name(self): df.to_sql('test_unicode', self.conn, index=False) -class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi): +class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi, unittest.TestCase): """ Test the public API as it would be used directly @@ -1024,11 +1024,11 @@ def tearDown(self): super(_EngineToConnMixin, self).tearDown() -class TestSQLApiConn(_EngineToConnMixin, TestSQLApi): +class TestSQLApiConn(_EngineToConnMixin, TestSQLApi, unittest.TestCase): pass -class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi): +class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi, unittest.TestCase): """ Test the public sqlite connection fallback API @@ -1875,34 +1875,39 @@ def test_schema_support(self): tm.assert_frame_equal(res1, res2) -class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy): +class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy, unittest.TestCase): pass -class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn): +class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn, + unittest.TestCase): pass -class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy): +class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy, + unittest.TestCase): pass -class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn): +class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn, + unittest.TestCase): pass -class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy): +class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy, + unittest.TestCase): pass -class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn): +class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn, + unittest.TestCase): pass # ----------------------------------------------------------------------------- # -- Test Sqlite / MySQL fallback -class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest): +class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest, unittest.TestCase): """ Test the fallback mode against an in-memory sqlite database. @@ -2661,5 +2666,6 @@ def clean_up(test_table_to_drop): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index cd972868a6e32..9d634f2b1ec74 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -1281,5 +1281,6 @@ def test_out_of_range_float(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py index c3f09e21b1545..9f8a0b4dfbc8f 100644 --- a/pandas/rpy/tests/test_common.py +++ b/pandas/rpy/tests/test_common.py @@ -211,6 +211,6 @@ def test_factor(self): assert np.equal(result, factors) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py index 592926f8e821d..3e9f43db3d3a2 100644 --- a/pandas/sparse/tests/test_array.py +++ b/pandas/sparse/tests/test_array.py @@ -813,6 +813,6 @@ def test_ufunc_args(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_combine_concat.py b/pandas/sparse/tests/test_combine_concat.py index fcdc6d9580dd5..369059d432109 100644 --- a/pandas/sparse/tests/test_combine_concat.py +++ b/pandas/sparse/tests/test_combine_concat.py @@ -360,5 +360,6 @@ def test_concat_sparse_dense(self): if __name__ == '__main__': import nose # noqa - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_frame.py b/pandas/sparse/tests/test_frame.py index 83b6a89811ee6..511dec405ed14 100644 --- a/pandas/sparse/tests/test_frame.py +++ b/pandas/sparse/tests/test_frame.py @@ -1151,6 +1151,6 @@ def test_numpy_func_call(self): getattr(np, func)(self.frame) if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_libsparse.py b/pandas/sparse/tests/test_libsparse.py index c289b4a1b204f..b7e4e83201115 100644 --- a/pandas/sparse/tests/test_libsparse.py +++ b/pandas/sparse/tests/test_libsparse.py @@ -589,5 +589,6 @@ def f(self): if __name__ == '__main__': import nose # noqa - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_list.py b/pandas/sparse/tests/test_list.py index b117685b6e968..5a5b12f867529 100644 --- a/pandas/sparse/tests/test_list.py +++ b/pandas/sparse/tests/test_list.py @@ -115,6 +115,6 @@ def test_getitem(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/sparse/tests/test_series.py b/pandas/sparse/tests/test_series.py index 14339ab388a5d..607c85060cc4b 100644 --- a/pandas/sparse/tests/test_series.py +++ b/pandas/sparse/tests/test_series.py @@ -1369,6 +1369,6 @@ def test_numpy_func_call(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/stats/tests/test_fama_macbeth.py b/pandas/stats/tests/test_fama_macbeth.py index 706becfa730c4..c25f60ba000db 100644 --- a/pandas/stats/tests/test_fama_macbeth.py +++ b/pandas/stats/tests/test_fama_macbeth.py @@ -68,6 +68,6 @@ def _check_stuff_works(self, result): result.summary if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/stats/tests/test_math.py b/pandas/stats/tests/test_math.py index bc09f33d2f467..3fe8eb3a154b8 100644 --- a/pandas/stats/tests/test_math.py +++ b/pandas/stats/tests/test_math.py @@ -59,5 +59,6 @@ def test_inv_illformed(self): self.assertTrue(np.allclose(rs, expected)) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 6f688649affb0..ed940c004592e 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -977,6 +977,6 @@ def tsAssertEqual(self, ts1, ts2, **kwargs): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py index 00e5e002ca48d..38ef82e54737a 100644 --- a/pandas/tests/formats/test_format.py +++ b/pandas/tests/formats/test_format.py @@ -4454,5 +4454,6 @@ def test_format_percentiles(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/formats/test_printing.py b/pandas/tests/formats/test_printing.py index 3bcceca1f50a7..8b89118ffb58b 100644 --- a/pandas/tests/formats/test_printing.py +++ b/pandas/tests/formats/test_printing.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import nose from pandas import compat import pandas.formats.printing as printing import pandas.formats.format as fmt @@ -138,5 +137,6 @@ def test_ambiguous_width(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 37f67712e1b58..b9cd764c8704c 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -89,11 +89,11 @@ def empty(self): @cache_readonly def ts1(self): - return tm.makeTimeSeries() + return tm.makeTimeSeries(nper=30) @cache_readonly def ts2(self): - return tm.makeTimeSeries()[5:] + return tm.makeTimeSeries(nper=30)[5:] @cache_readonly def simple(self): diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index f6081e14d4081..5ae9adaf6c45e 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2187,5 +2187,6 @@ def test_dot(self): df.dot(df2) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_asof.py b/pandas/tests/frame/test_asof.py index 6c15c75cb5427..0349803d6a37e 100644 --- a/pandas/tests/frame/test_asof.py +++ b/pandas/tests/frame/test_asof.py @@ -1,7 +1,5 @@ # coding=utf-8 -import nose - import numpy as np from pandas import DataFrame, date_range @@ -68,5 +66,6 @@ def test_subset(self): assert_frame_equal(result, expected) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bf0fabaf3e402..4628ef4b175fa 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1923,5 +1923,6 @@ def test_from_index(self): if __name__ == '__main__': import nose # noqa - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 720dcdd62dd89..c341bffefaeb6 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2774,6 +2774,6 @@ def test_transpose(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_misc_api.py index bc750727493a3..2ab34b5059fee 100644 --- a/pandas/tests/frame/test_misc_api.py +++ b/pandas/tests/frame/test_misc_api.py @@ -4,7 +4,6 @@ # pylint: disable-msg=W0612,E1101 from copy import deepcopy import sys -import nose from distutils.version import LooseVersion from pandas.compat import range, lrange @@ -493,5 +492,6 @@ def _check_f(base, f): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 8a6cbe44465c1..9df98358031f3 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -660,7 +660,6 @@ def test_interp_ignore_all_good(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'] - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 8462d5cd9bcf6..47030b1559a59 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -1261,5 +1261,6 @@ def test_alignment_non_pandas(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 36ae5dac733a5..e2153fb74433d 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -1158,5 +1158,6 @@ def setUpClass(cls): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index c6c3b4f43b55a..411f1726cede4 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -383,6 +383,6 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 1eb3454519ce3..9075f0b52639b 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1148,6 +1148,6 @@ def test_to_csv_quoting(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 6b162b71f79de..5231b43421dd1 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from __future__ import print_function -import nose from datetime import datetime @@ -490,5 +489,5 @@ def testit(label_list, shape): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s' - ], exit=False) + import pytest + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 99bea3a10115b..98833bfb079d5 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from __future__ import print_function -import nose from numpy import nan @@ -473,5 +472,5 @@ def testit(label_list, shape): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s' - ], exit=False) + import pytest + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index fb0f52886ec31..b1ee49f9bec99 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from __future__ import print_function -import nose from numpy import nan @@ -644,5 +643,6 @@ def testit(label_list, shape): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s' - ], exit=False) + import pytest + pytest.main([__file__, '-vvs', '-x', '--pdb']) + diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e87b5d04271e8..0c562855e102f 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -5812,5 +5812,6 @@ def testit(label_list, shape): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s' - ], exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py index 3465d776bfa85..27260c2a43274 100644 --- a/pandas/tests/indexing/test_callable.py +++ b/pandas/tests/indexing/test_callable.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 -import nose - import numpy as np import pandas as pd import pandas.util.testing as tm @@ -271,5 +269,6 @@ def test_frame_iloc_callable_setitem(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 4e5558309bad5..bc08dcc5acbf2 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -5433,5 +5433,6 @@ def test_none_coercion_mixed_dtypes(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 0916693ade2ce..bc4c0fbea8fff 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -372,5 +372,6 @@ def test_grouped_box_multiple_axes(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index f07aadba175f2..8eb24da8f83da 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1312,5 +1312,6 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 87cf89ebf0a9d..8a5935d4428d4 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2730,5 +2730,6 @@ def _generate_4_axes_via_gridspec(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py index 101a6556c61bf..5d7f70d1e21da 100644 --- a/pandas/tests/plotting/test_groupby.py +++ b/pandas/tests/plotting/test_groupby.py @@ -1,8 +1,6 @@ #!/usr/bin/env python # coding: utf-8 -import nose - from pandas import Series, DataFrame import pandas.util.testing as tm @@ -78,5 +76,6 @@ def test_plot_kwargs(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index c7bff5a31fc02..1a569b2c030ca 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -1,8 +1,6 @@ #!/usr/bin/env python # coding: utf-8 -import nose - from pandas import Series, DataFrame import pandas.util.testing as tm from pandas.util.testing import slow @@ -422,5 +420,6 @@ def test_axis_share_xy(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 6c313f5937602..8dbb275f3d512 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -1,8 +1,6 @@ #!/usr/bin/env python # coding: utf-8 -import nose - from pandas import Series, DataFrame from pandas.compat import lmap import pandas.util.testing as tm @@ -302,5 +300,6 @@ def test_subplot_titles(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 6878ca0e1bc06..5bc46219dccbb 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -1,7 +1,6 @@ #!/usr/bin/env python # coding: utf-8 -import nose import itertools from datetime import datetime @@ -815,5 +814,6 @@ def test_custom_business_day_freq(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/series/test_asof.py b/pandas/tests/series/test_asof.py index e2092feab9004..647fc9f133e8d 100644 --- a/pandas/tests/series/test_asof.py +++ b/pandas/tests/series/test_asof.py @@ -1,7 +1,5 @@ # coding=utf-8 -import nose - import numpy as np from pandas import (offsets, Series, notnull, @@ -154,5 +152,6 @@ def test_errors(self): s.asof(s.index[0], subset='foo') if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index c44a7a898bb8d..eaa64f58c8022 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -1982,6 +1982,6 @@ def test_setitem_slice_into_readonly_backing_data(self): ) if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 3c82e4ed82969..9d817b45aa80c 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -933,7 +933,6 @@ def test_interp_timedelta64(self): assert_series_equal(result, expected) if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'] - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 6e3d52366a4ec..dc21582d1644f 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -563,6 +563,6 @@ def test_empty_series_ops(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 75dd887c9d290..303f31be112bf 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1345,6 +1345,6 @@ def test_index(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 717eae3e59715..7375a61475f79 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -5,6 +5,7 @@ import sys from datetime import datetime, timedelta +import nose import numpy as np import pandas as pd @@ -1101,8 +1102,6 @@ def f(): if __name__ == '__main__': - import nose + import pytest - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'], - exit=False) + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 23280395427fd..1ccc1c2ef1dfb 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -4542,7 +4542,6 @@ def test_map(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'] - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 09dd3f7ab517c..db9827098bf74 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import nose import numpy as np from pandas import Series, Timestamp @@ -199,5 +198,6 @@ def test_dict_compat(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index c037f02f20609..173a304e145a6 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -442,6 +442,6 @@ def test_bool_ops_warn_on_arithmetic(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3500ce913462a..bc14d9fa898bc 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -1987,5 +1987,6 @@ def test_pipe_panel(self): result = wp.pipe((f, 'y'), x=1, y=1) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 32e8f44e6f258..953a64d58d407 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -1183,5 +1183,6 @@ def assert_add_equals(val, inc, result): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index bfdb77f3fb350..4f3673d26f5ec 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -196,6 +196,6 @@ def test_inner_join_indexer2(): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 945f8004687cd..794c6355b31fc 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -235,7 +235,6 @@ def test_empty_like(self): if __name__ == '__main__': - import nose + import pytest - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 4e7ace4173227..1f2a799f580f3 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2450,5 +2450,6 @@ def test_iloc_mi(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index dd3a49de55d73..e5e7dcfb38953 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1003,6 +1003,6 @@ def prng(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s' - ], exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 9cb2dd5a40ac4..853a4346251a0 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2541,5 +2541,6 @@ def test_panel_index(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 1b5a7b6ee1e83..3504b9315bd1e 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -951,5 +951,6 @@ def test_get_attr(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py index 2a69a65e8d55e..7dbe541103773 100644 --- a/pandas/tests/test_panelnd.py +++ b/pandas/tests/test_panelnd.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -import nose - from pandas.core import panelnd from pandas.core.panel import Panel @@ -103,5 +101,6 @@ def test_5d_construction(self): # expected = if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index 603674ac01bc0..c728ea6421c49 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 -import nose - from pandas import DataFrame, Series from pandas.core.sparse import SparseDataFrame import pandas as pd @@ -917,5 +915,6 @@ def test_multiple_id_columns(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_stats.py b/pandas/tests/test_stats.py index 41d25b9662b5b..12fb71128f23b 100644 --- a/pandas/tests/test_stats.py +++ b/pandas/tests/test_stats.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from pandas import compat -import nose from distutils.version import LooseVersion from numpy import nan @@ -188,5 +187,6 @@ def test_rank_object_bug(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index bbcd856250c51..867bdf2a7c601 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -4,8 +4,6 @@ from datetime import datetime, timedelta import re -import nose - from numpy import nan as NA import numpy as np from numpy.random import randint @@ -2681,5 +2679,6 @@ def test_method_on_bytes(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py index 98b3b474f785d..cd2590eb93403 100644 --- a/pandas/tests/test_take.py +++ b/pandas/tests/test_take.py @@ -2,7 +2,6 @@ import re from datetime import datetime -import nose import numpy as np from pandas.compat import long import pandas.core.algorithms as algos @@ -451,5 +450,6 @@ def test_2d_datetime64(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index 7a217ed9dbd86..1ac8a8def1db6 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -806,5 +806,6 @@ def f(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py index cb12048676d26..974b7bf281416 100644 --- a/pandas/tests/test_util.py +++ b/pandas/tests/test_util.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -import nose - from collections import OrderedDict import sys import unittest @@ -388,5 +386,6 @@ def test_numpy_errstate_is_default(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/types/test_cast.py index 56a14a51105ca..6743a27bc92e8 100644 --- a/pandas/tests/types/test_cast.py +++ b/pandas/tests/types/test_cast.py @@ -4,9 +4,6 @@ These test the private routines in types/cast.py """ - - -import nose from datetime import datetime import numpy as np @@ -281,5 +278,6 @@ def test_period_dtype(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_common.py b/pandas/tests/types/test_common.py index 4d6f50862c562..83dcbb86eedcd 100644 --- a/pandas/tests/types/test_common.py +++ b/pandas/tests/types/test_common.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import nose import numpy as np from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype @@ -58,5 +57,6 @@ def test_dtype_equal(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_concat.py b/pandas/tests/types/test_concat.py index 6403dcb5a5350..11db1081945e5 100644 --- a/pandas/tests/types/test_concat.py +++ b/pandas/tests/types/test_concat.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import nose import pandas as pd import pandas.types.concat as _concat import pandas.util.testing as tm @@ -82,5 +81,6 @@ def test_get_dtype_kinds_period(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_dtypes.py b/pandas/tests/types/test_dtypes.py index a2b0a9ebfa6cc..f6782a703ed71 100644 --- a/pandas/tests/types/test_dtypes.py +++ b/pandas/tests/types/test_dtypes.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from itertools import product -import nose import numpy as np import pandas as pd from pandas import Series, Categorical, date_range @@ -351,5 +350,6 @@ def test_not_string(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_generic.py b/pandas/tests/types/test_generic.py index 89913de6f6069..43b75ec8c1f75 100644 --- a/pandas/tests/types/test_generic.py +++ b/pandas/tests/types/test_generic.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import nose import numpy as np import pandas as pd import pandas.util.testing as tm @@ -43,5 +42,6 @@ def test_abc_types(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index 8180757d9e706..325744a6860af 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -6,7 +6,6 @@ """ -import nose import collections import re from datetime import datetime, date, timedelta, time @@ -935,5 +934,6 @@ def test_ensure_categorical(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_io.py b/pandas/tests/types/test_io.py index 545edf8f1386c..789188ffb10f3 100644 --- a/pandas/tests/types/test_io.py +++ b/pandas/tests/types/test_io.py @@ -110,7 +110,6 @@ def test_convert_downcast_int64(self): if __name__ == '__main__': - import nose + import pytest - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tests/types/test_missing.py b/pandas/tests/types/test_missing.py index fa2bd535bb8d5..04368cfe57fe5 100644 --- a/pandas/tests/types/test_missing.py +++ b/pandas/tests/types/test_missing.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import nose import numpy as np from datetime import datetime from pandas.util import testing as tm @@ -307,5 +306,6 @@ def test_na_value_for_dtype(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_concat.py b/pandas/tools/tests/test_concat.py index 172eee99b7c6b..97569b5a273ee 100644 --- a/pandas/tools/tests/test_concat.py +++ b/pandas/tools/tests/test_concat.py @@ -1,5 +1,3 @@ -import nose - import numpy as np from numpy.random import randn @@ -2174,5 +2172,6 @@ def test_concat_multiindex_dfs_with_deepcopy(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_join.py b/pandas/tools/tests/test_join.py index f33d5f16cd439..c3653ed8cc3e8 100644 --- a/pandas/tools/tests/test_join.py +++ b/pandas/tools/tests/test_join.py @@ -1,7 +1,5 @@ # pylint: disable=E1103 -import nose - from numpy.random import randn import numpy as np @@ -800,5 +798,6 @@ def _join_by_hand(a, b, how='left'): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index f078959608f91..db3af08ec709f 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -1,7 +1,5 @@ # pylint: disable=E1103 -import nose - from datetime import datetime from numpy.random import randn from numpy import nan @@ -1373,5 +1371,6 @@ def f(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_merge_asof.py b/pandas/tools/tests/test_merge_asof.py index bbbf1a3bdfff9..8dcc614e7a9d3 100644 --- a/pandas/tools/tests/test_merge_asof.py +++ b/pandas/tools/tests/test_merge_asof.py @@ -1,4 +1,3 @@ -import nose import os import pytz @@ -757,5 +756,6 @@ def test_on_float_by_int(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_merge_ordered.py b/pandas/tools/tests/test_merge_ordered.py index 0511a0ca6d1cf..ca6f7cf4ff15b 100644 --- a/pandas/tools/tests/test_merge_ordered.py +++ b/pandas/tools/tests/test_merge_ordered.py @@ -1,5 +1,3 @@ -import nose - import pandas as pd from pandas import DataFrame, merge_ordered from pandas.util import testing as tm @@ -94,5 +92,6 @@ def test_empty_sequence_concat(self): pd.concat([pd.DataFrame(), None]) if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index e63cfcc8c0590..10b8eb721a789 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -1323,6 +1323,6 @@ def test_crosstab_with_numpy_size(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index 5c7cee862ccd3..87242263c4471 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -1,5 +1,4 @@ import os -import nose import numpy as np from pandas.compat import zip @@ -354,5 +353,6 @@ def curpath(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py index 8a8960a057926..05bb1d3718b35 100644 --- a/pandas/tools/tests/test_util.py +++ b/pandas/tools/tests/test_util.py @@ -12,7 +12,6 @@ import pandas.util.testing as tm from pandas.tools.util import cartesian_product, to_numeric -CURRENT_LOCALE = locale.getlocale() LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None) @@ -89,6 +88,8 @@ def test_get_locales_prefix(self): assert len(tm.get_locales(prefix=first_locale[:2])) > 0 def test_set_locale(self): + CURRENT_LOCALE = locale.getlocale() + if len(self.locales) == 1: raise nose.SkipTest("Only a single locale found, no point in " "trying to test setting another locale") @@ -481,5 +482,6 @@ def test_downcast_limits(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py index bca50237081e1..1da0fba8eaad4 100644 --- a/pandas/tseries/tests/test_base.py +++ b/pandas/tseries/tests/test_base.py @@ -2758,7 +2758,6 @@ def test_equals(self): if __name__ == '__main__': - import nose + import pytest - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_converter.py b/pandas/tseries/tests/test_converter.py index 7e4ed288e31c1..d93ec5db7a02d 100644 --- a/pandas/tseries/tests/test_converter.py +++ b/pandas/tseries/tests/test_converter.py @@ -183,6 +183,6 @@ def test_integer_passthrough(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 87f9f55e0189c..4fe8df5b0d42b 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -1,6 +1,5 @@ from datetime import datetime from pandas.compat import range -import nose import numpy as np from pandas.core.index import Index @@ -820,5 +819,6 @@ def test_cdaterange_weekmask_and_holidays(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_holiday.py b/pandas/tseries/tests/test_holiday.py index 62446e8e637c6..149db5787b69c 100644 --- a/pandas/tseries/tests/test_holiday.py +++ b/pandas/tseries/tests/test_holiday.py @@ -15,7 +15,6 @@ USLaborDay, USColumbusDay, USMartinLutherKingJr, USPresidentsDay) from pytz import utc -import nose class TestCalendar(tm.TestCase): @@ -388,5 +387,6 @@ def test_both_offset_observance_raises(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 768e9212e6c42..59565aa46ea56 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -4959,5 +4959,6 @@ def test_all_offset_classes(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index ad4f669fceb42..77dc5bfee77e0 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -4970,6 +4970,6 @@ def test_get_period_field_array_raises_on_out_of_range(self): if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 26c311b4a72f8..7331d0f58ad38 100755 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -3,7 +3,6 @@ from datetime import datetime, timedelta from functools import partial -import nose import numpy as np import pandas as pd @@ -3126,5 +3125,6 @@ def test_aggregate_with_nat(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 1d07b4ab39a99..9898ca0b3e6b5 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -2,7 +2,6 @@ from __future__ import division from datetime import timedelta, time -import nose from distutils.version import LooseVersion import numpy as np @@ -2010,5 +2009,6 @@ def test_add_overflow(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 1834c56e59bb9..ce3ff0b35e89d 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -5658,5 +5658,6 @@ def test_day_not_in_month_ignore(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index d8c01c53fb2e5..878020d7362d2 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -222,5 +222,6 @@ def test_rule_aliases(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index db8cda5c76479..74ba15ba4fb3f 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -1,6 +1,5 @@ # pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta, tzinfo, date -import nose import numpy as np import pytz @@ -1687,5 +1686,6 @@ def test_nat(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 58ec1561b2535..f8dd0b899d41e 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -1,4 +1,3 @@ -import nose from distutils.version import LooseVersion import numpy as np @@ -1542,5 +1541,6 @@ def test_resolution(self): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/tseries/tests/test_util.py b/pandas/tseries/tests/test_util.py index 96da32a4a845c..56ebcbfea0572 100644 --- a/pandas/tseries/tests/test_util.py +++ b/pandas/tseries/tests/test_util.py @@ -1,5 +1,4 @@ from pandas.compat import range -import nose import numpy as np @@ -128,5 +127,6 @@ def test_normalize_date(): if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) + import pytest + + pytest.main([__file__, '-vvs', '-x', '--pdb']) diff --git a/pandas/util/nosetester.py b/pandas/util/nosetester.py deleted file mode 100644 index 1bdaaff99fd50..0000000000000 --- a/pandas/util/nosetester.py +++ /dev/null @@ -1,261 +0,0 @@ -""" -Nose test running. - -This module implements ``test()`` function for pandas modules. - -""" -from __future__ import division, absolute_import, print_function - -import os -import sys -import warnings -from pandas.compat import string_types -from numpy.testing import nosetester - - -def get_package_name(filepath): - """ - Given a path where a package is installed, determine its name. - - Parameters - ---------- - filepath : str - Path to a file. If the determination fails, "pandas" is returned. - - Examples - -------- - >>> pandas.util.nosetester.get_package_name('nonsense') - 'pandas' - - """ - - pkg_name = [] - while 'site-packages' in filepath or 'dist-packages' in filepath: - filepath, p2 = os.path.split(filepath) - if p2 in ('site-packages', 'dist-packages'): - break - pkg_name.append(p2) - - # if package name determination failed, just default to pandas - if not pkg_name: - return "pandas" - - # otherwise, reverse to get correct order and return - pkg_name.reverse() - - # don't include the outer egg directory - if pkg_name[0].endswith('.egg'): - pkg_name.pop(0) - - return '.'.join(pkg_name) - -import_nose = nosetester.import_nose -run_module_suite = nosetester.run_module_suite - - -class NoseTester(nosetester.NoseTester): - """ - Nose test runner. - - This class is made available as pandas.util.nosetester.NoseTester, and - a test function is typically added to a package's __init__.py like so:: - - from numpy.testing import Tester - test = Tester().test - - Calling this test function finds and runs all tests associated with the - package and all its sub-packages. - - Attributes - ---------- - package_path : str - Full path to the package to test. - package_name : str - Name of the package to test. - - Parameters - ---------- - package : module, str or None, optional - The package to test. If a string, this should be the full path to - the package. If None (default), `package` is set to the module from - which `NoseTester` is initialized. - raise_warnings : None, str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of 'warn' during the test execution. Valid strings are: - - - "develop" : equals ``(DeprecationWarning, RuntimeWarning)`` - - "release" : equals ``()``, don't raise on any warnings. - - See Notes for more details. - - Notes - ----- - The default for `raise_warnings` is - ``(DeprecationWarning, RuntimeWarning)`` for development versions of - pandas, and ``()`` for released versions. The purpose of this switching - behavior is to catch as many warnings as possible during development, but - not give problems for packaging of released versions. - - """ - excludes = [] - - def _show_system_info(self): - nose = import_nose() - - import pandas - print("pandas version %s" % pandas.__version__) - import numpy - print("numpy version %s" % numpy.__version__) - pddir = os.path.dirname(pandas.__file__) - print("pandas is installed in %s" % pddir) - - pyversion = sys.version.replace('\n', '') - print("Python version %s" % pyversion) - print("nose version %d.%d.%d" % nose.__versioninfo__) - - def _get_custom_doctester(self): - """ Return instantiated plugin for doctests - - Allows subclassing of this class to override doctester - - A return value of None means use the nose builtin doctest plugin - """ - return None - - def _test_argv(self, label, verbose, extra_argv): - """ - Generate argv for nosetest command - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - see ``test`` docstring - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - argv : list - command line arguments that will be passed to nose - """ - - argv = [__file__, self.package_path] - if label and label != 'full': - if not isinstance(label, string_types): - raise TypeError('Selection label should be a string') - if label == 'fast': - label = 'not slow and not network and not disabled' - argv += ['-A', label] - argv += ['--verbosity', str(verbose)] - - # When installing with setuptools, and also in some other cases, the - # test_*.py files end up marked +x executable. Nose, by default, does - # not run files marked with +x as they might be scripts. However, in - # our case nose only looks for test_*.py files under the package - # directory, which should be safe. - argv += ['--exe'] - - if extra_argv: - argv += extra_argv - return argv - - def test(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, raise_warnings=None): - """ - Run tests for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - * attribute_identifier - string passed directly to nosetests - as '-A'. - - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - doctests : bool, optional - If True, run doctests in module. Default is False. - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - (This requires the `coverage module - <http://nedbatchelder.com/code/modules/coverage.html>`_). - raise_warnings : str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of 'warn' during the test execution. Valid strings are: - - - 'develop' : equals ``(DeprecationWarning, RuntimeWarning)`` - - 'release' : equals ``()``, don't raise on any warnings. - - Returns - ------- - result : object - Returns the result of running the tests as a - ``nose.result.TextTestResult`` object. - - """ - - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - if doctests: - print("Running unit tests and doctests for %s" % self.package_name) - else: - print("Running unit tests for %s" % self.package_name) - - self._show_system_info() - - # reset doctest state on every run - import doctest - doctest.master = None - - if raise_warnings is None: - - # default based on if we are released - from pandas import __version__ - from distutils.version import StrictVersion - try: - StrictVersion(__version__) - raise_warnings = 'release' - except ValueError: - raise_warnings = 'develop' - - _warn_opts = dict(develop=(DeprecationWarning, RuntimeWarning), - release=()) - if isinstance(raise_warnings, string_types): - raise_warnings = _warn_opts[raise_warnings] - - with warnings.catch_warnings(): - - if len(raise_warnings): - - # Reset the warning filters to the default state, - # so that running the tests is more repeatable. - warnings.resetwarnings() - # Set all warnings to 'warn', this is because the default - # 'once' has the bad property of possibly shadowing later - # warnings. - warnings.filterwarnings('always') - # Force the requested warnings to raise - for warningtype in raise_warnings: - warnings.filterwarnings('error', category=warningtype) - # Filter out annoying import messages. - warnings.filterwarnings("ignore", category=FutureWarning) - - from numpy.testing.noseclasses import NumpyTestProgram - argv, plugins = self.prepare_test_args( - label, verbose, extra_argv, doctests, coverage) - t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) - - return t.result diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 05517bf6cf53a..0b38b4124075a 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -19,7 +19,8 @@ from distutils.version import LooseVersion from numpy.random import randn, rand -from numpy.testing.decorators import slow # noqa +# from numpy.testing.decorators import slow # noqa +import pytest import numpy as np import pandas as pd @@ -2538,9 +2539,8 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", % extra_warnings) -def disabled(t): - t.disabled = True - return t +disabled = pytest.mark.disabled +slow = pytest.mark.slow class RNGContext(object): diff --git a/setup.cfg b/setup.cfg index f69e256b80869..1180e50718e33 100644 --- a/setup.cfg +++ b/setup.cfg @@ -19,3 +19,8 @@ based_on_style = pep8 split_before_named_assigns = false split_penalty_after_opening_bracket = 1000000 split_penalty_logical_operator = 30 + +[tool:pytest] +# TODO: Change all yield-based (nose-style) fixutures to pytest fixtures +# Silencing the warning until then +addopts = --disable-pytest-warnings diff --git a/test.bat b/test.bat index 16aa6c9105ec3..7f9244abb2bc8 100644 --- a/test.bat +++ b/test.bat @@ -1,3 +1,4 @@ :: test on windows -nosetests --exe -A "not slow and not network and not disabled" pandas %* +:: nosetests --exe -A "not slow and not network and not disabled" pandas %* +pytest pandas diff --git a/test.sh b/test.sh index 4a9ffd7be98b1..23c7ff52d2ce9 100755 --- a/test.sh +++ b/test.sh @@ -1,11 +1,4 @@ #!/bin/sh command -v coverage >/dev/null && coverage erase command -v python-coverage >/dev/null && python-coverage erase -# nosetests pandas/tests/test_index.py --with-coverage --cover-package=pandas.core --pdb-failure --pdb -#nosetests -w pandas --with-coverage --cover-package=pandas --pdb-failure --pdb #--cover-inclusive -#nosetests -A "not slow" -w pandas/tseries --with-coverage --cover-package=pandas.tseries $* #--cover-inclusive -nosetests -w pandas --with-coverage --cover-package=pandas $* -# nosetests -w pandas/io --with-coverage --cover-package=pandas.io --pdb-failure --pdb -# nosetests -w pandas/core --with-coverage --cover-package=pandas.core --pdb-failure --pdb -# nosetests -w pandas/stats --with-coverage --cover-package=pandas.stats -# coverage run runtests.py +pytest pandas --cov=pandas diff --git a/test_fast.sh b/test_fast.sh index b390705f901ad..0b394cffa3d74 100755 --- a/test_fast.sh +++ b/test_fast.sh @@ -1 +1,2 @@ -nosetests -A "not slow and not network" pandas --with-id $* +# nosetests -A "not slow and not network" pandas --with-id $* +pytest pandas --skip-slow diff --git a/test_multi.sh b/test_multi.sh index 5d77945c66a26..7bc9f2c8f0054 100755 --- a/test_multi.sh +++ b/test_multi.sh @@ -1 +1,2 @@ -nosetests -A "not slow and not network" pandas --processes=4 $* +# nosetests -A "not slow and not network" pandas --processes=4 $* +pytest pandas diff --git a/test_rebuild.sh b/test_rebuild.sh index d3710c5ff67d3..65aa1098811a1 100755 --- a/test_rebuild.sh +++ b/test_rebuild.sh @@ -3,10 +3,4 @@ python setup.py clean python setup.py build_ext --inplace coverage erase -# nosetests pandas/tests/test_index.py --with-coverage --cover-package=pandas.core --pdb-failure --pdb -#nosetests -w pandas --with-coverage --cover-package=pandas --pdb-failure --pdb #--cover-inclusive -nosetests -w pandas --with-coverage --cover-package=pandas $* #--cover-inclusive -# nosetests -w pandas/io --with-coverage --cover-package=pandas.io --pdb-failure --pdb -# nosetests -w pandas/core --with-coverage --cover-package=pandas.core --pdb-failure --pdb -# nosetests -w pandas/stats --with-coverage --cover-package=pandas.stats -# coverage run runtests.py +pytest pandas --cov=pandas diff --git a/tox.ini b/tox.ini index 5d6c8975307b6..85c5d90fde7fb 100644 --- a/tox.ini +++ b/tox.ini @@ -10,6 +10,7 @@ envlist = py27, py34, py35 deps = cython nose + pytest pytz>=2011k python-dateutil beautifulsoup4 @@ -26,7 +27,7 @@ changedir = {envdir} commands = # TODO: --exe because of GH #761 - {envbindir}/nosetests --exe pandas {posargs:-A "not network and not disabled"} + {envbindir}/pytest pandas {posargs:-A "not network and not disabled"} # cleanup the temp. build dir created by the tox build # /bin/rm -rf {toxinidir}/build @@ -63,18 +64,18 @@ usedevelop = True deps = {[testenv]deps} openpyxl<2.0.0 -commands = {envbindir}/nosetests {toxinidir}/pandas/io/tests/test_excel.py +commands = {envbindir}/pytest {toxinidir}/pandas/io/tests/test_excel.py [testenv:openpyxl20] usedevelop = True deps = {[testenv]deps} openpyxl<2.2.0 -commands = {envbindir}/nosetests {posargs} {toxinidir}/pandas/io/tests/test_excel.py +commands = {envbindir}/pytest {posargs} {toxinidir}/pandas/io/tests/test_excel.py [testenv:openpyxl22] usedevelop = True deps = {[testenv]deps} openpyxl>=2.2.0 -commands = {envbindir}/nosetests {posargs} {toxinidir}/pandas/io/tests/test_excel.py +commands = {envbindir}/pytest {posargs} {toxinidir}/pandas/io/tests/test_excel.py
Wondering if there's some strange caching on my other commit because [this](https://travis-ci.org/pandas-dev/pandas/jobs/189014114) failure doesn't make sense. Even nose is failing on those tests :/
https://api.github.com/repos/pandas-dev/pandas/pulls/15065
2017-01-05T01:26:00Z
2017-01-05T14:02:39Z
null
2017-04-05T02:07:16Z
pathlib support in pytables
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index e474aeab1f6ca..7d66e1b4e1364 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -555,6 +555,8 @@ def open(self, mode='a', **kwargs): """ tables = _tables() + path_or_buf = _stringify_path(path_or_buf) + if self._mode != mode: # if we are changing a write mode to read, ok
- [ ] closes #14705 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry As we talked about on gitter, I added the line (https://github.com/pandas-dev/pandas/blob/master/pandas/io/pytables.py#L266) to the HDFStore.open function. Let me know if I misunderstood or should make further edits, thank you!
https://api.github.com/repos/pandas-dev/pandas/pulls/15064
2017-01-05T00:46:41Z
2017-03-28T18:03:08Z
null
2017-03-29T09:32:42Z
add the 'name' attribute to dataframes that go through apply_frame_axis0
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 03ff62568b405..4e66c96b82761 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -108,16 +108,34 @@ def setup(self): self.N = 10000 self.labels = np.random.randint(0, 2000, size=self.N) self.labels2 = np.random.randint(0, 3, size=self.N) - self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), }) - - def f(self, g): + self.df = DataFrame({ + 'key': self.labels, + 'key2': self.labels2, + 'value1': randn(self.N), + 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), + }) + + @staticmethod + def scalar_function(g): return 1 - def time_groupby_frame_apply(self): - self.df.groupby(['key', 'key2']).apply(self.f) + def time_groupby_frame_apply_scalar_function(self): + self.df.groupby(['key', 'key2']).apply(self.scalar_function) + + def time_groupby_frame_apply_scalar_function_overhead(self): + self.df.groupby('key').apply(self.scalar_function) + + @staticmethod + def df_copy_function(g): + # ensure that the group name is available (see GH #15062) + g.name + return g.copy() + + def time_groupby_frame_df_copy_function(self): + self.df.groupby(['key', 'key2']).apply(self.df_copy_function) - def time_groupby_frame_apply_overhead(self): - self.df.groupby('key').apply(self.f) + def time_groupby_frame_apply_df_copy_overhead(self): + self.df.groupby('key').apply(self.df_copy_function) #---------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 7e4fa44ea8ded..360fcd8a37908 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -370,6 +370,10 @@ Performance Improvements - When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. +- Improve performance of ``pd.core.groupby.GroupBy.apply`` when the applied + function used the ``.name`` attribute of the group DataFrame (:issue:`15062`). + + .. _whatsnew_0200.bug_fixes: diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx index 1cd3e53494a72..2bba07256305a 100644 --- a/pandas/src/reduce.pyx +++ b/pandas/src/reduce.pyx @@ -497,7 +497,7 @@ def apply_frame_axis0(object frame, object f, object names, # Need to infer if our low-level mucking is going to cause a segfault if n > 0: chunk = frame.iloc[starts[0]:ends[0]] - shape_before = chunk.shape + object.__setattr__(chunk, 'name', names[0]) try: result = f(chunk) if result is chunk: diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 7140eb5a6fd12..483c3cb330c31 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -6022,6 +6022,21 @@ def test_cummin_cummax(self): result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(expected, result) + def test_group_name_available_in_inference_pass(self): + df = pd.DataFrame({'a': [0, 0, 1, 1, 2, 2], 'b': np.arange(6)}) + + names = [] + + def f(group): + names.append(group.name) + return group.copy() + + df.groupby('a', sort=False, group_keys=False).apply(f) + # we expect 2 zeros because we call ``f`` once to see if a faster route + # can be used. + expected_names = [0, 0, 1, 2] + tm.assert_equal(names, expected_names) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
- [ ] closes #xxxx - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry Previously, if you did `group.name` in the applied function, it would fail and fall back to the slower path because the attribute did not exist. `shape_before` was unused.
https://api.github.com/repos/pandas-dev/pandas/pulls/15062
2017-01-05T00:22:46Z
2017-03-25T16:15:15Z
null
2017-03-25T16:15:40Z
ENH: Accept callable for skiprows in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index dae97f7bc7f34..9f5e6f2331bc5 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -187,6 +187,16 @@ skipinitialspace : boolean, default ``False`` skiprows : list-like or integer, default ``None`` Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file. + + If callable, the callable function will be evaluated against the row + indices, returning True if the row should be skipped and False otherwise: + + .. ipython:: python + + data = 'col1,col2,col3\na,b,1\na,b,2\nc,d,3' + pd.read_csv(StringIO(data)) + pd.read_csv(StringIO(data), skiprows=lambda x: x % 2 != 0) + skipfooter : int, default ``0`` Number of lines at bottom of file to skip (unsupported with engine='c'). skip_footer : int, default ``0`` diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 70fddea3fe1a9..eb3c6c40682ed 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -110,6 +110,7 @@ Other enhancements - ``pd.qcut`` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`7751`) - ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`) - The ``usecols`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`14154`) +- The ``skiprows`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`10882`) - ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) - ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) - ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index f2c3113fc2cdd..fdf26fdef6b25 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -132,9 +132,13 @@ Values to consider as False skipinitialspace : boolean, default False Skip spaces after delimiter. -skiprows : list-like or integer, default None +skiprows : list-like or integer or callable, default None Line numbers to skip (0-indexed) or number of lines to skip (int) - at the start of the file + at the start of the file. + + If callable, the callable function will be evaluated against the row + indices, returning True if the row should be skipped and False otherwise. + An example of a valid callable argument would be ``lambda x: x in [0, 2]``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c') skip_footer : int, default 0 @@ -930,7 +934,10 @@ def _clean_options(self, options, engine): if engine != 'c': if is_integer(skiprows): skiprows = lrange(skiprows) - skiprows = set() if skiprows is None else set(skiprows) + if skiprows is None: + skiprows = set() + elif not callable(skiprows): + skiprows = set(skiprows) # put stuff back result['names'] = names @@ -1851,6 +1858,11 @@ def __init__(self, f, **kwds): self.memory_map = kwds['memory_map'] self.skiprows = kwds['skiprows'] + if callable(self.skiprows): + self.skipfunc = self.skiprows + else: + self.skipfunc = lambda x: x in self.skiprows + self.skipfooter = kwds['skipfooter'] self.delimiter = kwds['delimiter'] @@ -2006,7 +2018,7 @@ class MyDialect(csv.Dialect): # attempt to sniff the delimiter if sniff_sep: line = f.readline() - while self.pos in self.skiprows: + while self.skipfunc(self.pos): self.pos += 1 line = f.readline() @@ -2414,7 +2426,7 @@ def _empty(self, line): def _next_line(self): if isinstance(self.data, list): - while self.pos in self.skiprows: + while self.skipfunc(self.pos): self.pos += 1 while True: @@ -2433,7 +2445,7 @@ def _next_line(self): except IndexError: raise StopIteration else: - while self.pos in self.skiprows: + while self.skipfunc(self.pos): self.pos += 1 next(self.data) @@ -2685,7 +2697,7 @@ def _get_lines(self, rows=None): # Check for stop rows. n.b.: self.skiprows is a set. if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) - if i + self.pos not in self.skiprows] + if not self.skipfunc(i + self.pos)] lines.extend(new_rows) self.pos = new_pos @@ -2713,7 +2725,7 @@ def _get_lines(self, rows=None): except StopIteration: if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) - if self.pos + i not in self.skiprows] + if not self.skipfunc(i + self.pos)] lines.extend(new_rows) if len(lines) == 0: raise diff --git a/pandas/io/tests/parser/skiprows.py b/pandas/io/tests/parser/skiprows.py index 9f01adb6fabcb..c53e6a1579267 100644 --- a/pandas/io/tests/parser/skiprows.py +++ b/pandas/io/tests/parser/skiprows.py @@ -12,6 +12,7 @@ import pandas.util.testing as tm from pandas import DataFrame +from pandas.io.common import EmptyDataError from pandas.compat import StringIO, range, lrange @@ -198,3 +199,27 @@ def test_skiprows_infield_quote(self): df = self.read_csv(StringIO(data), skiprows=2) tm.assert_frame_equal(df, expected) + + def test_skiprows_callable(self): + data = 'a\n1\n2\n3\n4\n5' + + skiprows = lambda x: x % 2 == 0 + expected = DataFrame({'1': [3, 5]}) + df = self.read_csv(StringIO(data), skiprows=skiprows) + tm.assert_frame_equal(df, expected) + + expected = DataFrame({'foo': [3, 5]}) + df = self.read_csv(StringIO(data), skiprows=skiprows, + header=0, names=['foo']) + tm.assert_frame_equal(df, expected) + + skiprows = lambda x: True + msg = "No columns to parse from file" + with tm.assertRaisesRegexp(EmptyDataError, msg): + self.read_csv(StringIO(data), skiprows=skiprows) + + # This is a bad callable and should raise. + msg = "by zero" + skiprows = lambda x: 1 / 0 + with tm.assertRaisesRegexp(ZeroDivisionError, msg): + self.read_csv(StringIO(data), skiprows=skiprows) diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 7b31f7fe27c1e..bd793c98eef5b 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -178,6 +178,7 @@ cdef extern from "parser/tokenizer.h": int header_end # header row end void *skipset + PyObject *skipfunc int64_t skip_first_N_rows int skipfooter double (*converter)(const char *, char **, char, char, char, int) nogil @@ -606,9 +607,11 @@ cdef class TextReader: cdef _make_skiprow_set(self): if isinstance(self.skiprows, (int, np.integer)): parser_set_skipfirstnrows(self.parser, self.skiprows) - else: + elif not callable(self.skiprows): for i in self.skiprows: parser_add_skiprow(self.parser, i) + else: + self.parser.skipfunc = <PyObject *> self.skiprows cdef _setup_parser_source(self, source): cdef: @@ -2115,18 +2118,33 @@ cdef kh_float64_t* kset_float64_from_list(values) except NULL: cdef raise_parser_error(object base, parser_t *parser): cdef: object old_exc + object exc_type PyObject *type PyObject *value PyObject *traceback if PyErr_Occurred(): - PyErr_Fetch(&type, &value, &traceback); - Py_XDECREF(type) + PyErr_Fetch(&type, &value, &traceback) Py_XDECREF(traceback) + if value != NULL: old_exc = <object> value Py_XDECREF(value) - raise old_exc + + # PyErr_Fetch only returned the error message in *value, + # so the Exception class must be extracted from *type. + if isinstance(old_exc, compat.string_types): + if type != NULL: + exc_type = <object> type + else: + exc_type = ParserError + + Py_XDECREF(type) + raise exc_type(old_exc) + else: + Py_XDECREF(type) + raise old_exc + message = '%s. C error: ' % base if parser.error_msg != NULL: if PY3: diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index bc729cd3e7453..87e17fe5fb751 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -124,6 +124,7 @@ void parser_set_default_options(parser_t *self) { self->thousands = '\0'; self->skipset = NULL; + self->skipfunc = NULL; self->skip_first_N_rows = -1; self->skip_footer = 0; } @@ -679,7 +680,27 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) { } int skip_this_line(parser_t *self, int64_t rownum) { - if (self->skipset != NULL) { + int should_skip; + PyObject *result; + PyGILState_STATE state; + + if (self->skipfunc != NULL) { + state = PyGILState_Ensure(); + result = PyObject_CallFunction(self->skipfunc, "i", rownum); + + // Error occurred. It will be processed + // and caught at the Cython level. + if (result == NULL) { + should_skip = -1; + } else { + should_skip = PyObject_IsTrue(result); + } + + Py_XDECREF(result); + PyGILState_Release(state); + + return should_skip; + } else if (self->skipset != NULL) { return (kh_get_int64((kh_int64_t *)self->skipset, self->file_lines) != ((kh_int64_t *)self->skipset)->n_buckets); } else { @@ -689,6 +710,7 @@ int skip_this_line(parser_t *self, int64_t rownum) { int tokenize_bytes(parser_t *self, size_t line_limit, int start_lines) { int i, slen; + int should_skip; long maxstreamsize; char c; char *stream; @@ -818,7 +840,11 @@ int tokenize_bytes(parser_t *self, size_t line_limit, int start_lines) { case START_RECORD: // start of record - if (skip_this_line(self, self->file_lines)) { + should_skip = skip_this_line(self, self->file_lines); + + if (should_skip == -1) { + goto parsingerror; + } else if (should_skip) { if (IS_QUOTE(c)) { self->state = IN_QUOTED_FIELD_IN_SKIP_LINE; } else { diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h index d31bf4b688c58..e7271cabb0752 100644 --- a/pandas/src/parser/tokenizer.h +++ b/pandas/src/parser/tokenizer.h @@ -198,6 +198,7 @@ typedef struct parser_t { int header_end; // header row end void *skipset; + PyObject *skipfunc; int64_t skip_first_N_rows; int skip_footer; double (*converter)(const char *, char **, char, char, char, int);
Title is self-explanatory. xref #10882.
https://api.github.com/repos/pandas-dev/pandas/pulls/15059
2017-01-04T18:36:32Z
2017-01-14T17:05:52Z
null
2017-01-14T19:47:32Z
DOC: Small corrections to describe docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5f8be9632cbe1..1680c061ad7d3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5287,7 +5287,6 @@ def describe(self, percentiles=None, include=None, exclude=None): -------- Describing a numeric ``Series``. - >>> import pandas as pd >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 @@ -5311,7 +5310,6 @@ def describe(self, percentiles=None, include=None, exclude=None): Describing a timestamp ``Series``. - >>> import numpy as np >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), @@ -5329,10 +5327,8 @@ def describe(self, percentiles=None, include=None, exclude=None): Describing a ``DataFrame``. By default only numeric fields are returned. - >>> df = pd.DataFrame( - ... [[1, 'a'], [2, 'b'], [3, 'c']], - ... columns=['numeric', 'object'] - ... ) + >>> df = pd.DataFrame([[1, 'a'], [2, 'b'], [3, 'c']], + ... columns=['numeric', 'object']) >>> df.describe() numeric count 3.0
I've made a number of small changes to an earlier pull request revising the describe documentation to bring it closer to pandas official style. I learned this rules as part of a subsequent request expanding the concat documentation and it seemed right to loop back here.
https://api.github.com/repos/pandas-dev/pandas/pulls/15057
2017-01-04T16:31:18Z
2017-01-05T09:44:24Z
2017-01-05T09:44:24Z
2017-01-05T18:20:52Z
BUG: Fix for .str.replace with repl function
diff --git a/doc/source/text.rst b/doc/source/text.rst index 3a4a57ff4da95..52e05c5d511bc 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -146,6 +146,25 @@ following code will cause trouble because of the regular expression meaning of # We need to escape the special character (for >1 len patterns) dollars.str.replace(r'-\$', '-') +The ``replace`` method can also take a callable as replacement. It is called +on every ``pat`` using :func:`re.sub`. The callable should expect one +positional argument (a regex object) and return a string. + +.. versionadded:: 0.20.0 + +.. ipython:: python + + # Reverse every lowercase alphabetic word + pat = r'[a-z]+' + repl = lambda m: m.group(0)[::-1] + pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(pat, repl) + + # Using regex groups + pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" + repl = lambda m: m.group('two').swapcase() + pd.Series(['Foo Bar Baz', np.nan]).str.replace(pat, repl) + + Indexing with ``.str`` ---------------------- @@ -406,7 +425,7 @@ Method Summary :meth:`~Series.str.join`;Join strings in each element of the Series with passed separator :meth:`~Series.str.get_dummies`;Split strings on the delimiter returning DataFrame of dummy variables :meth:`~Series.str.contains`;Return boolean array if each string contains pattern/regex - :meth:`~Series.str.replace`;Replace occurrences of pattern/regex with some other string + :meth:`~Series.str.replace`;Replace occurrences of pattern/regex with some other string or the return value of a callable given the occurrence :meth:`~Series.str.repeat`;Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) :meth:`~Series.str.pad`;"Add whitespace to left, right, or both sides of strings" :meth:`~Series.str.center`;Equivalent to ``str.center`` diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 2db03724e564d..25dffc9a4960d 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -23,6 +23,7 @@ New features ~~~~~~~~~~~~ - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`. +- ``.str.replace`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 3041b17b99b17..c48defe39a011 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -167,7 +167,17 @@ def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object): try: convert = not all(mask) result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert) - except (TypeError, AttributeError): + except (TypeError, AttributeError) as e: + # Reraise the exception if callable `f` got wrong number of args. + # The user may want to be warned by this, instead of getting NaN + if compat.PY2: + p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?' + else: + p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ ' + r'(?(3)required )positional arguments?') + + if len(e.args) >= 1 and re.search(p_err, e.args[0]): + raise e def g(x): try: @@ -303,8 +313,13 @@ def str_replace(arr, pat, repl, n=-1, case=True, flags=0): ---------- pat : string Character sequence or regular expression - repl : string - Replacement sequence + repl : string or callable + Replacement string or a callable. The callable is passed the regex + match object and must return a replacement string to be used. + See :func:`re.sub`. + + .. versionadded:: 0.20.0 + n : int, default -1 (all) Number of replacements to make from start case : boolean, default True @@ -315,12 +330,53 @@ def str_replace(arr, pat, repl, n=-1, case=True, flags=0): Returns ------- replaced : Series/Index of objects + + Examples + -------- + When ``repl`` is a string, every ``pat`` is replaced as with + :meth:`str.replace`. NaN value(s) in the Series are left as is. + + >>> Series(['foo', 'fuz', np.nan]).str.replace('f', 'b') + 0 boo + 1 buz + 2 NaN + dtype: object + + When ``repl`` is a callable, it is called on every ``pat`` using + :func:`re.sub`. The callable should expect one positional argument + (a regex object) and return a string. + + To get the idea: + + >>> Series(['foo', 'fuz', np.nan]).str.replace('f', repr) + 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo + 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz + 2 NaN + dtype: object + + Reverse every lowercase alphabetic word: + + >>> repl = lambda m: m.group(0)[::-1] + >>> Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) + 0 oof 123 + 1 rab zab + 2 NaN + dtype: object + + Using regex groups: + + >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" + >>> repl = lambda m: m.group('two').swapcase() + >>> Series(['Foo Bar Baz', np.nan]).str.replace(pat, repl) + 0 bAR + 1 NaN + dtype: object """ - # Check whether repl is valid (GH 13438) - if not is_string_like(repl): - raise TypeError("repl must be a string") - use_re = not case or len(pat) > 1 or flags + # Check whether repl is valid (GH 13438, GH 15055) + if not (is_string_like(repl) or callable(repl)): + raise TypeError("repl must be a string or callable") + use_re = not case or len(pat) > 1 or flags or callable(repl) if use_re: if not case: diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index bbcd856250c51..47b64eac33d0b 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -436,6 +436,43 @@ def test_replace(self): values = klass(data) self.assertRaises(TypeError, values.str.replace, 'a', repl) + def test_replace_callable(self): + # GH 15055 + values = Series(['fooBAD__barBAD', NA]) + + # test with callable + repl = lambda m: m.group(0).swapcase() + result = values.str.replace('[a-z][A-Z]{2}', repl, n=2) + exp = Series(['foObaD__baRbaD', NA]) + tm.assert_series_equal(result, exp) + + # test with wrong number of arguments, raising an error + if compat.PY2: + p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?' + else: + p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ ' + r'(?(3)required )positional arguments?') + + repl = lambda: None + with tm.assertRaisesRegexp(TypeError, p_err): + values.str.replace('a', repl) + + repl = lambda m, x: None + with tm.assertRaisesRegexp(TypeError, p_err): + values.str.replace('a', repl) + + repl = lambda m, x, y=None: None + with tm.assertRaisesRegexp(TypeError, p_err): + values.str.replace('a', repl) + + # test regex named groups + values = Series(['Foo Bar Baz', NA]) + pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)" + repl = lambda m: m.group('middle').swapcase() + result = values.str.replace(pat, repl) + exp = Series(['bAR', NA]) + tm.assert_series_equal(result, exp) + def test_repeat(self): values = Series(['a', 'b', NA, 'c', NA, 'd'])
.str.replace now accepts a callable (function) as replacement string. It now raises a TypeError when repl is not string like nor a callable. Docstring updated accordingly. - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry - [x] closes #15055
https://api.github.com/repos/pandas-dev/pandas/pulls/15056
2017-01-04T15:30:09Z
2017-01-23T18:11:50Z
null
2017-01-24T08:04:48Z
BUG: make sure that we are passing thru kwargs to groupby
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index c82dc370e3e71..e8a3f52975bc0 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -332,6 +332,7 @@ Bug Fixes +- Bug in groupby operations with timedelta64 when passing ``numeric_only=False`` (:issue:`5724`) - Bug in ``DataFrame.to_html`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`) diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index adc17c7514832..895a376457f09 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -306,12 +306,18 @@ def validate_expanding_func(name, args, kwargs): raise UnsupportedFunctionCall(msg) -def validate_groupby_func(name, args, kwargs): +def validate_groupby_func(name, args, kwargs, allowed=None): """ - 'args' and 'kwargs' should be empty because all of + 'args' and 'kwargs' should be empty, except for allowed + kwargs because all of their necessary parameters are explicitly listed in the function signature """ + if allowed is None: + allowed = [] + + kwargs = set(kwargs) - set(allowed) + if len(args) + len(kwargs) > 0: raise UnsupportedFunctionCall(( "numpy operations are not valid " diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 700e279cb0030..ddf6d95fa2ab4 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -19,6 +19,7 @@ is_categorical_dtype, is_datetimelike, is_datetime_or_timedelta_dtype, + is_datetime64_any_dtype, is_bool, is_integer_dtype, is_complex_dtype, is_bool_dtype, @@ -109,10 +110,12 @@ def _groupby_function(name, alias, npfunc, numeric_only=True, @Substitution(name='groupby', f=name) @Appender(_doc_template) @Appender(_local_template) - def f(self): + def f(self, **kwargs): + if 'numeric_only' not in kwargs: + kwargs['numeric_only'] = numeric_only self._set_group_selection() try: - return self._cython_agg_general(alias, numeric_only=numeric_only) + return self._cython_agg_general(alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) except Exception: @@ -127,7 +130,9 @@ def f(self): def _first_compat(x, axis=0): + def _first(x): + x = np.asarray(x) x = x[notnull(x)] if len(x) == 0: @@ -142,6 +147,7 @@ def _first(x): def _last_compat(x, axis=0): def _last(x): + x = np.asarray(x) x = x[notnull(x)] if len(x) == 0: @@ -775,7 +781,7 @@ def _try_cast(self, result, obj): return result def _cython_transform(self, how, numeric_only=True): - output = {} + output = collections.OrderedDict() for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) if numeric_only and not is_numeric: @@ -783,6 +789,8 @@ def _cython_transform(self, how, numeric_only=True): try: result, names = self.grouper.transform(obj.values, how) + except NotImplementedError: + continue except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -792,7 +800,7 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True): output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -1015,26 +1023,26 @@ def mean(self, *args, **kwargs): For multiple groupings, the result index will be a MultiIndex """ - nv.validate_groupby_func('mean', args, kwargs) + nv.validate_groupby_func('mean', args, kwargs, ['numeric_only']) try: - return self._cython_agg_general('mean') + return self._cython_agg_general('mean', **kwargs) except GroupByError: raise except Exception: # pragma: no cover self._set_group_selection() - f = lambda x: x.mean(axis=self.axis) + f = lambda x: x.mean(axis=self.axis, **kwargs) return self._python_agg_general(f) @Substitution(name='groupby') @Appender(_doc_template) - def median(self): + def median(self, **kwargs): """ Compute median of groups, excluding missing values For multiple groupings, the result index will be a MultiIndex """ try: - return self._cython_agg_general('median') + return self._cython_agg_general('median', **kwargs) except GroupByError: raise except Exception: # pragma: no cover @@ -1044,7 +1052,7 @@ def median(self): def f(x): if isinstance(x, np.ndarray): x = Series(x) - return x.median(axis=self.axis) + return x.median(axis=self.axis, **kwargs) return self._python_agg_general(f) @Substitution(name='groupby') @@ -1063,7 +1071,7 @@ def std(self, ddof=1, *args, **kwargs): # TODO: implement at Cython level? nv.validate_groupby_func('std', args, kwargs) - return np.sqrt(self.var(ddof=ddof)) + return np.sqrt(self.var(ddof=ddof, **kwargs)) @Substitution(name='groupby') @Appender(_doc_template) @@ -1080,10 +1088,10 @@ def var(self, ddof=1, *args, **kwargs): """ nv.validate_groupby_func('var', args, kwargs) if ddof == 1: - return self._cython_agg_general('var') + return self._cython_agg_general('var', **kwargs) else: self._set_group_selection() - f = lambda x: x.var(ddof=ddof) + f = lambda x: x.var(ddof=ddof, **kwargs) return self._python_agg_general(f) @Substitution(name='groupby') @@ -1400,39 +1408,39 @@ def cumcount(self, ascending=True): @Appender(_doc_template) def cumprod(self, axis=0, *args, **kwargs): """Cumulative product for each group""" - nv.validate_groupby_func('cumprod', args, kwargs) + nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only']) if axis != 0: - return self.apply(lambda x: x.cumprod(axis=axis)) + return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) - return self._cython_transform('cumprod') + return self._cython_transform('cumprod', **kwargs) @Substitution(name='groupby') @Appender(_doc_template) def cumsum(self, axis=0, *args, **kwargs): """Cumulative sum for each group""" - nv.validate_groupby_func('cumsum', args, kwargs) + nv.validate_groupby_func('cumsum', args, kwargs, ['numeric_only']) if axis != 0: - return self.apply(lambda x: x.cumsum(axis=axis)) + return self.apply(lambda x: x.cumsum(axis=axis, **kwargs)) - return self._cython_transform('cumsum') + return self._cython_transform('cumsum', **kwargs) @Substitution(name='groupby') @Appender(_doc_template) - def cummin(self, axis=0): + def cummin(self, axis=0, **kwargs): """Cumulative min for each group""" if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) - return self._cython_transform('cummin') + return self._cython_transform('cummin', **kwargs) @Substitution(name='groupby') @Appender(_doc_template) - def cummax(self, axis=0): + def cummax(self, axis=0, **kwargs): """Cumulative max for each group""" if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) - return self._cython_transform('cummax') + return self._cython_transform('cummax', **kwargs) @Substitution(name='groupby') @Appender(_doc_template) @@ -1828,6 +1836,28 @@ def wrapper(*args, **kwargs): def _cython_operation(self, kind, values, how, axis): assert kind in ['transform', 'aggregate'] + # can we do this operation with our cython functions + # if not raise NotImplementedError + + # we raise NotImplemented if this is an invalid operation + # entirely, e.g. adding datetimes + + # categoricals are only 1d, so we + # are not setup for dim transforming + if is_categorical_dtype(values): + raise NotImplementedError( + "categoricals are not support in cython ops ATM") + elif is_datetime64_any_dtype(values): + if how in ['add', 'prod', 'cumsum', 'cumprod']: + raise NotImplementedError( + "datetime64 type does not support {} " + "operations".format(how)) + elif is_timedelta64_dtype(values): + if how in ['prod', 'cumprod']: + raise NotImplementedError( + "timedelta64 type does not support {} " + "operations".format(how)) + arity = self._cython_arity.get(how, 1) vdim = values.ndim @@ -3155,9 +3185,9 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True): new_items, new_blocks = self._cython_agg_blocks( - how, numeric_only=numeric_only) + how, alt=alt, numeric_only=numeric_only) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3183,29 +3213,75 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, numeric_only=True): - data, agg_axis = self._get_data_to_aggregate() + def _cython_agg_blocks(self, how, alt=None, numeric_only=True): + # TODO: the actual managing of mgr_locs is a PITA + # here, it should happen via BlockManager.combine - new_blocks = [] + data, agg_axis = self._get_data_to_aggregate() if numeric_only: data = data.get_numeric_data(copy=False) + new_blocks = [] + new_items = [] + deleted_items = [] for block in data.blocks: - result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + locs = block.mgr_locs.as_array + try: + result, _ = self.grouper.aggregate( + block.values, how, axis=agg_axis) + except NotImplementedError: + # generally if we have numeric_only=False + # and non-applicable functions + # try to python agg + + if alt is None: + # we cannot perform the operation + # in an alternate way, exclude the block + deleted_items.append(locs) + continue + + # call our grouper again with only this block + obj = self.obj[data.items[locs]] + s = groupby(obj, self.grouper) + result = s.aggregate(lambda x: alt(x, axis=self.axis)) + result = result._data.blocks[0] # see if we can cast the block back to the original dtype result = block._try_coerce_and_cast_result(result) - newb = make_block(result, placement=block.mgr_locs) + new_items.append(locs) + newb = block.make_block_same_class(result) new_blocks.append(newb) if len(new_blocks) == 0: raise DataError('No numeric types to aggregate') - return data.items, new_blocks + # reset the locs in the blocks to correspond to our + # current ordering + indexer = np.concatenate(new_items) + new_items = data.items.take(np.sort(indexer)) + + if len(deleted_items): + + # we need to adjust the indexer to account for the + # items we have removed + # really should be done in internals :< + + deleted = np.concatenate(deleted_items) + ai = np.arange(len(data)) + mask = np.zeros(len(data)) + mask[deleted] = 1 + indexer = (ai - mask.cumsum())[indexer] + + offset = 0 + for b in new_blocks: + l = len(b.mgr_locs) + b.mgr_locs = indexer[offset:(offset + l)] + offset += l + + return new_items, new_blocks def _get_data_to_aggregate(self): obj = self._obj_with_exclusions diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index b00dc62206f57..f8a1e5a684858 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2257,9 +2257,131 @@ def test_max_min_non_numeric(self): result = aa.groupby('nn').max() self.assertTrue('ss' in result) + result = aa.groupby('nn').max(numeric_only=False) + self.assertTrue('ss' in result) + result = aa.groupby('nn').min() self.assertTrue('ss' in result) + result = aa.groupby('nn').min(numeric_only=False) + self.assertTrue('ss' in result) + + def test_arg_passthru(self): + # make sure that we are passing thru kwargs + # to our agg functions + + # GH3668 + # GH5724 + df = pd.DataFrame( + {'group': [1, 1, 2], + 'int': [1, 2, 3], + 'float': [4., 5., 6.], + 'string': list('abc'), + 'category_string': pd.Series(list('abc')).astype('category'), + 'category_int': [7, 8, 9], + 'datetime': pd.date_range('20130101', periods=3), + 'datetimetz': pd.date_range('20130101', + periods=3, + tz='US/Eastern'), + 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')}, + columns=['group', 'int', 'float', 'string', + 'category_string', 'category_int', + 'datetime', 'datetimetz', + 'timedelta']) + + expected_columns_numeric = Index(['int', 'float', 'category_int']) + + # mean / median + expected = pd.DataFrame( + {'category_int': [7.5, 9], + 'float': [4.5, 6.], + 'timedelta': [pd.Timedelta('1.5s'), + pd.Timedelta('3s')], + 'int': [1.5, 3], + 'datetime': [pd.Timestamp('2013-01-01 12:00:00'), + pd.Timestamp('2013-01-03 00:00:00')], + 'datetimetz': [ + pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'), + pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]}, + index=Index([1, 2], name='group'), + columns=['int', 'float', 'category_int', + 'datetime', 'datetimetz', 'timedelta']) + for attr in ['mean', 'median']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + result = f(numeric_only=False) + assert_frame_equal(result.reindex_like(expected), expected) + + # TODO: min, max *should* handle + # categorical (ordered) dtype + expected_columns = Index(['int', 'float', 'string', + 'category_int', + 'datetime', 'datetimetz', + 'timedelta']) + for attr in ['min', 'max']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + expected_columns = Index(['int', 'float', 'string', + 'category_string', 'category_int', + 'datetime', 'datetimetz', + 'timedelta']) + for attr in ['first', 'last']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + expected_columns = Index(['int', 'float', 'string', + 'category_int', 'timedelta']) + for attr in ['sum']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + expected_columns = Index(['int', 'float', 'category_int']) + for attr in ['prod', 'cumprod']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + # like min, max, but don't include strings + expected_columns = Index(['int', 'float', + 'category_int', + 'datetime', 'datetimetz', + 'timedelta']) + for attr in ['cummin', 'cummax']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + expected_columns = Index(['int', 'float', 'category_int', + 'timedelta']) + for attr in ['cumsum']: + f = getattr(df.groupby('group'), attr) + result = f() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + result = f(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + def test_cython_agg_boolean(self): frame = DataFrame({'a': np.random.randint(0, 5, 50), 'b': np.random.randint(0, 2, 50).astype('bool')}) @@ -3436,6 +3558,7 @@ def test_int64_overflow(self): tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ]].values)) tups = com._asarray_tuplesafe(tups) + expected = df.groupby(tups).sum()['values'] for k, v in compat.iteritems(expected):
BUG: allow timedelta64 to work in groupby with numeric_only=False closes #5724 ``` In [6]: df = pd.DataFrame( ...: {'group': [1, 1, 2], ...: 'int': [1, 2, 3], ...: 'float': [4., 5., 6.], ...: 'string': list('abc'), ...: 'category_string': pd.Series(list('abc')).astype('category'), ...: 'category_int': [7, 8, 9], ...: 'datetime': pd.date_range('20130101', periods=3), ...: 'datetimetz': pd.date_range('20130101', ...: periods=3, ...: tz='US/Eastern'), ...: 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')}, ...: columns=['group', 'int', 'float', 'string', ...: 'category_string', 'category_int', ...: 'datetime', 'datetimetz', ...: 'timedelta']) In [7]: df Out[7]: group int float string category_string category_int datetime datetimetz timedelta 0 1 1 4.0 a a 7 2013-01-01 2013-01-01 00:00:00-05:00 00:00:01 1 1 2 5.0 b b 8 2013-01-02 2013-01-02 00:00:00-05:00 00:00:02 2 2 3 6.0 c c 9 2013-01-03 2013-01-03 00:00:00-05:00 00:00:03 # same as in master In [8]: df.groupby('group').mean() Out[8]: int float category_int group 1 1.5 4.5 7.5 2 3.0 6.0 9.0 # works In [9]: df.groupby('group').mean(numeric_only=False) Out[9]: int float category_int datetime datetimetz timedelta group 1 1.5 4.5 7.5 2013-01-01 12:00:00 2013-01-01 12:00:00-05:00 00:00:01.500000 2 3.0 6.0 9.0 2013-01-03 00:00:00 2013-01-03 00:00:00-05:00 00:00:03 # same as in master In [10]: df.groupby('group').sum() Out[10]: int float category_int group 1 3 9.0 15 2 3 6.0 9 # works In [11]: df.groupby('group').sum(numeric_only=False) Out[11]: int float string category_int timedelta group 1 3 9.0 ab 15 00:00:03 2 3 6.0 c 9 00:00:03 # same as in master In [13]: df.groupby('group').min() Out[13]: int float string category_int datetime datetimetz timedelta group 1 1 4.0 a 7 2013-01-01 2013-01-01 00:00:00-05:00 00:00:01 2 3 6.0 c 9 2013-01-03 2013-01-03 00:00:00-05:00 00:00:03 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/15054
2017-01-04T12:51:12Z
2017-01-18T16:14:35Z
null
2017-01-18T16:14:36Z
PERF: Cythonize Groupby.cummin/cummax (#15048)
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index ad58cd0fc6d70..597b040b8075c 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -690,6 +690,3 @@ def time_shift(self): def time_transform_dataframe(self): # GH 12737 self.df_nans.groupby('key').transform('first') - - - diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index c9ea7b427b3f2..e7f61289db4fb 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -283,7 +283,7 @@ Performance Improvements - Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`) - Improved performance of timeseries plotting with an irregular DatetimeIndex (or with ``compat_x=True``) (:issue:`15073`). - +- Improved performance of ``groupby().cummin()`` and ``groupby().cummax()`` (:issue:`15048`) - When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7eba32b4932d0..700e279cb0030 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -75,7 +75,7 @@ 'last', 'first', 'head', 'tail', 'median', 'mean', 'sum', 'min', 'max', - 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount', + 'cumcount', 'resample', 'describe', 'rank', 'quantile', @@ -97,7 +97,8 @@ _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) -_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift']) +_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift', + 'cummin', 'cummax']) def _groupby_function(name, alias, npfunc, numeric_only=True, @@ -1415,6 +1416,24 @@ def cumsum(self, axis=0, *args, **kwargs): return self._cython_transform('cumsum') + @Substitution(name='groupby') + @Appender(_doc_template) + def cummin(self, axis=0): + """Cumulative min for each group""" + if axis != 0: + return self.apply(lambda x: np.minimum.accumulate(x, axis)) + + return self._cython_transform('cummin') + + @Substitution(name='groupby') + @Appender(_doc_template) + def cummax(self, axis=0): + """Cumulative max for each group""" + if axis != 0: + return self.apply(lambda x: np.maximum.accumulate(x, axis)) + + return self._cython_transform('cummax') + @Substitution(name='groupby') @Appender(_doc_template) def shift(self, periods=1, freq=None, axis=0): @@ -1752,6 +1771,8 @@ def get_group_levels(self): 'transform': { 'cumprod': 'group_cumprod', 'cumsum': 'group_cumsum', + 'cummin': 'group_cummin', + 'cummax': 'group_cummax', } } diff --git a/pandas/src/algos_groupby_helper.pxi.in b/pandas/src/algos_groupby_helper.pxi.in index 5c704436ce3a0..70862e198edba 100644 --- a/pandas/src/algos_groupby_helper.pxi.in +++ b/pandas/src/algos_groupby_helper.pxi.in @@ -568,6 +568,76 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, else: out[i, j] = minx[i, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, + ndarray[{{dest_type2}}, ndim=2] values, + ndarray[int64_t] labels, + ndarray[{{dest_type2}}, ndim=2] accum): + """ + Only transforms on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, size + {{dest_type2}} val, min_val + int64_t lab + + N, K = (<object> values).shape + accum.fill({{inf_val}}) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + if val == val: + if val < accum[lab, j]: + min_val = val + accum[lab, j] = min_val + out[i, j] = accum[lab, j] + # val = nan + else: + out[i, j] = {{nan_val}} + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, + ndarray[{{dest_type2}}, ndim=2] values, + ndarray[int64_t] labels, + ndarray[{{dest_type2}}, ndim=2] accum): + """ + Only transforms on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, size + {{dest_type2}} val, max_val + int64_t lab + + N, K = (<object> values).shape + accum.fill(-{{inf_val}}) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + if val == val: + if val > accum[lab, j]: + max_val = val + accum[lab, j] = max_val + out[i, j] = accum[lab, j] + # val = nan + else: + out[i, j] = {{nan_val}} + {{endfor}} #---------------------------------------------------------------------- diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e87b5d04271e8..3cde3b81fea3b 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -4977,10 +4977,6 @@ def test_groupby_whitelist(self): 'max', 'head', 'tail', - 'cumsum', - 'cumprod', - 'cummin', - 'cummax', 'cumcount', 'resample', 'describe', @@ -5018,10 +5014,6 @@ def test_groupby_whitelist(self): 'max', 'head', 'tail', - 'cumsum', - 'cumprod', - 'cummin', - 'cummax', 'cumcount', 'resample', 'describe', @@ -5777,6 +5769,85 @@ def test_agg_over_numpy_arrays(self): assert_frame_equal(result, expected) + def test_cummin_cummax(self): + # GH 15048 + num_types = [np.int32, np.int64, np.float32, np.float64] + num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min, + np.finfo(np.float32).min, np.finfo(np.float64).min] + num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max, + np.finfo(np.float32).max, np.finfo(np.float64).max] + base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2], + 'B': [3, 4, 3, 2, 2, 3, 2, 1]}) + expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] + expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] + + for dtype, min_val, max_val in zip(num_types, num_mins, num_max): + df = base_df.astype(dtype) + + # cummin + expected = pd.DataFrame({'B': expected_mins}).astype(dtype) + result = df.groupby('A').cummin() + tm.assert_frame_equal(result, expected) + result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test cummin w/ min value for dtype + df.loc[[2, 6], 'B'] = min_val + expected.loc[[2, 3, 6, 7], 'B'] = min_val + result = df.groupby('A').cummin() + tm.assert_frame_equal(result, expected) + expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() + tm.assert_frame_equal(result, expected) + + # cummax + expected = pd.DataFrame({'B': expected_maxs}).astype(dtype) + result = df.groupby('A').cummax() + tm.assert_frame_equal(result, expected) + result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test cummax w/ max value for dtype + df.loc[[2, 6], 'B'] = max_val + expected.loc[[2, 3, 6, 7], 'B'] = max_val + result = df.groupby('A').cummax() + tm.assert_frame_equal(result, expected) + expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test nan in some values + base_df.loc[[0, 2, 4, 6], 'B'] = np.nan + expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2, + np.nan, 3, np.nan, 1]}) + result = base_df.groupby('A').cummin() + tm.assert_frame_equal(result, expected) + expected = (base_df.groupby('A') + .B + .apply(lambda x: x.cummin()) + .to_frame()) + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4, + np.nan, 3, np.nan, 3]}) + result = base_df.groupby('A').cummax() + tm.assert_frame_equal(result, expected) + expected = (base_df.groupby('A') + .B + .apply(lambda x: x.cummax()) + .to_frame()) + tm.assert_frame_equal(result, expected) + + # Test nan in entire column + base_df['B'] = np.nan + expected = pd.DataFrame({'B': [np.nan] * 8}) + result = base_df.groupby('A').cummin() + tm.assert_frame_equal(expected, result) + result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() + tm.assert_frame_equal(expected, result) + result = base_df.groupby('A').cummax() + tm.assert_frame_equal(expected, result) + result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() + tm.assert_frame_equal(expected, result) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
- [x] closes #15048 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry ``` before after ratio [6eb705f5] [e8115142] - 1.08s 1.74ms 0.00 groupby.groupby_cummax_cummin.time_cummax - 1.09s 1.73ms 0.00 groupby.groupby_cummax_cummin.time_cummin SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ``` Still need to add some tests to see if this is equivalent to the old implimentation (numpy.minimum.accumulate?). Any early feedback appreciated.
https://api.github.com/repos/pandas-dev/pandas/pulls/15053
2017-01-04T08:25:44Z
2017-01-11T13:22:03Z
null
2017-12-20T02:04:03Z
DOC: Add example of skipcols in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 66296804cba5f..9683fedb78303 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -644,6 +644,15 @@ file, either using the column names, position numbers or a callable: pd.read_csv(StringIO(data), usecols=[0, 2, 3]) pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['A', 'C']) +The ``usecols`` argument can also be used to specify which columns not to +use in the final result: + +.. ipython:: python + pd.read_csv(StringIO(data), usecols=lambda x: x not in ['a', 'c']) + +In this case, the callable is specifying that we exclude the "a" and "c" +columns from the output. + Comments and Empty Lines ''''''''''''''''''''''''
Illustrate how we can use the `usecols` argument to skip particular columns. Closes #10882.
https://api.github.com/repos/pandas-dev/pandas/pulls/15052
2017-01-04T07:01:45Z
2017-01-04T22:28:13Z
2017-01-04T22:28:13Z
2017-01-04T22:29:42Z
TST: Add new test for flaky usecols
diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py index 4fb6ff00e2d7b..c654859f8dc7d 100644 --- a/pandas/io/tests/parser/usecols.py +++ b/pandas/io/tests/parser/usecols.py @@ -200,6 +200,26 @@ def test_usecols_with_parse_dates(self): parse_dates=parse_dates) tm.assert_frame_equal(df, expected) + # See gh-13604 + s = """2008-02-07 09:40,1032.43 + 2008-02-07 09:50,1042.54 + 2008-02-07 10:00,1051.65 + """ + parse_dates = [0] + names = ['date', 'values'] + usecols = names[:] + + index = Index([Timestamp('2008-02-07 09:40'), + Timestamp('2008-02-07 09:50'), + Timestamp('2008-02-07 10:00')], + name='date') + cols = {'values': [1032.43, 1042.54, 1051.65]} + expected = DataFrame(cols, index=index) + + df = self.read_csv(StringIO(s), parse_dates=parse_dates, index_col=0, + usecols=usecols, header=None, names=names) + tm.assert_frame_equal(df, expected) + # See gh-14792 s = """a,b,c,d,e,f,g,h,i,j 2016/09/21,1,1,2,3,4,5,6,7,8"""
Title is self-explanatory. xref #14984. Closes #13604.
https://api.github.com/repos/pandas-dev/pandas/pulls/15051
2017-01-04T06:51:20Z
2017-01-04T08:24:51Z
2017-01-04T08:24:51Z
2017-01-04T08:34:46Z
ERR: Disallow multi-char quotechar for C engine
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 2db03724e564d..23f2589adde89 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -246,6 +246,7 @@ Other API Changes - ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) - ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`) +- ``pd.read_csv()`` will now raise a ``ValueError`` for the C engine if the quote character is larger than than one byte (:issue:`11592`) .. _whatsnew_0200.deprecations: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 040ec3d803303..8a9873b240602 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -841,6 +841,17 @@ def _clean_options(self, options, engine): encoding=encoding) engine = 'python' + quotechar = options['quotechar'] + if (quotechar is not None and + isinstance(quotechar, (str, compat.text_type, bytes))): + if (len(quotechar) == 1 and ord(quotechar) > 127 and + engine not in ('python', 'python-fwf')): + fallback_reason = ("ord(quotechar) > 127, meaning the " + "quotechar is larger than one byte, " + "and the 'c' engine does not support " + "such quotechars") + engine = 'python' + if fallback_reason and engine_specified: raise ValueError(fallback_reason) diff --git a/pandas/io/tests/parser/quoting.py b/pandas/io/tests/parser/quoting.py index 765cec8243a0a..a692e03e868c7 100644 --- a/pandas/io/tests/parser/quoting.py +++ b/pandas/io/tests/parser/quoting.py @@ -149,5 +149,5 @@ def test_quotechar_unicode(self): # Compared to Python 3.x, Python 2.x does not handle unicode well. if PY3: - result = self.read_csv(StringIO(data), quotechar=u('\u0394')) + result = self.read_csv(StringIO(data), quotechar=u('\u0001')) tm.assert_frame_equal(result, expected) diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py index 64f31a11440d8..4d93df16a0279 100644 --- a/pandas/io/tests/parser/test_unsupported.py +++ b/pandas/io/tests/parser/test_unsupported.py @@ -50,12 +50,16 @@ def test_c_engine(self): sep=None, delim_whitespace=False) with tm.assertRaisesRegexp(ValueError, msg): read_table(StringIO(data), engine='c', sep=r'\s') + with tm.assertRaisesRegexp(ValueError, msg): + read_table(StringIO(data), engine='c', quotechar=chr(128)) with tm.assertRaisesRegexp(ValueError, msg): read_table(StringIO(data), engine='c', skipfooter=1) # specify C-unsupported options without python-unsupported options with tm.assert_produces_warning(parsers.ParserWarning): read_table(StringIO(data), sep=None, delim_whitespace=False) + with tm.assert_produces_warning(parsers.ParserWarning): + read_table(StringIO(data), quotechar=chr(128)) with tm.assert_produces_warning(parsers.ParserWarning): read_table(StringIO(data), sep=r'\s') with tm.assert_produces_warning(parsers.ParserWarning):
Raise `ValueError` or issue `ParserWarning` (as we do with other unsupported features) when a multi-char `quotechar` is passed in, and the C engine is used. Closes #11592.
https://api.github.com/repos/pandas-dev/pandas/pulls/15050
2017-01-04T06:22:54Z
2017-01-05T22:20:25Z
2017-01-05T22:20:24Z
2017-01-05T23:34:46Z
BF: define expected Series of correct for arch (eg i386) int
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 1b373baf9b3c1..dd4e0869e9627 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -127,6 +127,13 @@ def test_ndarray_compat_properties(self): values = idx.values for prop in self._compat_props: + # skip equivalency check if converted type is object, as happens + # for PeriodIndex, since then object (address) would be 4 bytes + # on 32bit platforms and equivalence to int64 of original + # date time is just accidental + if prop in ('itemsize', 'nbytes') \ + and values.dtype.name == 'object': + continue self.assertEqual(getattr(idx, prop), getattr(values, prop)) # test for validity diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index 5c7cee862ccd3..4fd53c1f696af 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -288,7 +288,10 @@ def test_qcut_duplicates_bin(self): def test_single_bin(self): # issue 14652 - expected = Series([0, 0]) + # Explicit dtype since Series produces int64 for ints, while cut + # (due to numpy.searchsorted) would use int32 on i386, so let's assure + # correct default to the architecture int + expected = Series([0, 0], dtype='intp') s = Series([9., 9.]) result = cut(s, 1, labels=False)
- [x] Closes #14866 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry (I believe it is nothing new ;) )
https://api.github.com/repos/pandas-dev/pandas/pulls/15044
2017-01-03T17:11:11Z
2017-04-03T15:22:26Z
null
2017-04-03T15:22:35Z
TST: Test empty method and simplify logic
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 96a13b0cb8aca..5f8be9632cbe1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -909,7 +909,7 @@ def empty(self): pandas.Series.dropna pandas.DataFrame.dropna """ - return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS) + return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) def __nonzero__(self): raise ValueError("The truth value of a {0} is ambiguous. " diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_misc_api.py index 089b71b30119b..bc750727493a3 100644 --- a/pandas/tests/frame/test_misc_api.py +++ b/pandas/tests/frame/test_misc_api.py @@ -410,9 +410,18 @@ def test_series_put_names(self): def test_empty_nonzero(self): df = DataFrame([1, 2, 3]) self.assertFalse(df.empty) + df = pd.DataFrame(index=[1], columns=[1]) + self.assertFalse(df.empty) df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna() self.assertTrue(df.empty) self.assertTrue(df.T.empty) + empty_frames = [pd.DataFrame(), + pd.DataFrame(index=[1]), + pd.DataFrame(columns=[1]), + pd.DataFrame({1: []})] + for df in empty_frames: + self.assertTrue(df.empty) + self.assertTrue(df.T.empty) def test_inplace_return_self(self): # re #1893 diff --git a/pandas/tests/series/test_misc_api.py b/pandas/tests/series/test_misc_api.py index 61bdc59cd500d..b1b06cc7be8a4 100644 --- a/pandas/tests/series/test_misc_api.py +++ b/pandas/tests/series/test_misc_api.py @@ -343,3 +343,10 @@ def test_str_attribute(self): s = Series(range(5)) with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'): s.str.repeat(2) + + def test_empty_method(self): + s_empty = pd.Series() + tm.assert_equal(s_empty.empty, True) + + for full_series in [pd.Series([1]), pd.Series(index=[1])]: + tm.assert_equal(full_series.empty, False)
- [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Since the docstring of `.empty` states > True if NDFrame is entirely empty [no items], meaning any of the axes are of length 0. I was wondering if the logic could be changed accordingly for code clarity. Unless it was set as `not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS)` for a particular reason. Also this method did not appear to have any test.
https://api.github.com/repos/pandas-dev/pandas/pulls/15043
2017-01-03T07:59:46Z
2017-01-04T14:36:58Z
2017-01-04T14:36:58Z
2017-12-20T02:04:03Z
BUG: Indexing MultiIndex with Series failed.
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 49d0a6efe6781..c57d92755b129 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -297,7 +297,7 @@ Bug Fixes - Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`) - Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`) - +- Bug in ``DataFrame`` where indexing a ``MultiIndex`` using a ``Series`` failed (:issue:`14730`) - Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) - Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index dad5bf5bc70ba..9fa5b67083b2d 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1461,6 +1461,9 @@ def _getitem_axis(self, key, axis=0): if isinstance(labels, MultiIndex): if (not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple)): + if isinstance(key, ABCSeries): + # GH 14730 + key = list(key) key = tuple([key]) # an iterable multi-selection diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index ccbe65e58a1a5..7da3cb377e63d 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -1,26 +1,29 @@ # -*- coding: utf-8 -*- -from datetime import timedelta -from itertools import product -import nose import re import warnings -from pandas import (DataFrame, date_range, period_range, MultiIndex, Index, - CategoricalIndex, compat) -from pandas.core.common import PerformanceWarning, UnsortedIndexError -from pandas.indexes.base import InvalidIndexError -from pandas.compat import range, lrange, u, PY3, long, lzip +from datetime import timedelta +from itertools import product + +import nose import numpy as np -from pandas.util.testing import (assert_almost_equal, assertRaises, - assertRaisesRegexp, assert_copy) +import pandas as pd + +from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex, + compat, date_range, period_range) +from pandas.compat import PY3, long, lrange, lzip, range, u +from pandas.core.common import PerformanceWarning, UnsortedIndexError +from pandas.indexes.base import InvalidIndexError +from pandas.lib import Timestamp import pandas.util.testing as tm -import pandas as pd -from pandas.lib import Timestamp +from pandas.util.testing import (assertRaises, assertRaisesRegexp, + assert_almost_equal, assert_copy) + from .common import Base diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 4e7ace4173227..8f4eaa0338c80 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -690,6 +690,25 @@ def test_getitem_partial(self): expected.columns = expected.columns.droplevel(0).droplevel(0) assert_frame_equal(result, expected) + def test_series_index(self): + # GH14730 + index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']]) + x = Series(index=index, data=range(9), dtype=np.float64) + y = Series([1, 3]) + expected = Series( + data=[0, 1, 2, 6, 7, 8], + index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]), + dtype=np.float64) + result = x.loc[y] + result2 = x.loc[[1, 3]] + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + empty_series = Series(data=[], dtype=np.float64) + expected2 = Series([], index=MultiIndex( + levels=index.levels, labels=[[], []], dtype=np.float64)) + result3 = x.loc[empty_series] + tm.assert_series_equal(result3, expected2) + def test_getitem_slice_not_sorted(self): df = self.frame.sortlevel(1).T
Previously, accessing elements of a MultiIndex-indexed DataFrame with a Series failed. This changes that behavior so that it is possible to use a Series to access elements from a MultiIndex-indexed DataFrame, just as one would use a list. - [x] closes #14730 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/15041
2017-01-03T01:38:35Z
2017-01-04T00:00:09Z
null
2017-01-04T00:00:28Z
DOC: Typing error in ExcelFile class example
diff --git a/doc/source/io.rst b/doc/source/io.rst index af05a89a54a62..3b8a97e3d189a 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2372,7 +2372,7 @@ read into memory only once. .. code-block:: python - xlsx = pd.ExcelFile('path_to_file.xls) + xlsx = pd.ExcelFile('path_to_file.xls') df = pd.read_excel(xlsx, 'Sheet1') The ``ExcelFile`` class can also be used as a context manager.
- [x] closes #15037 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry - Tests with local build of documentation passed - Flake8 seems to be relevant for source code changes, only - Thought that a whatsnew entry is not relevant in this case
https://api.github.com/repos/pandas-dev/pandas/pulls/15040
2017-01-02T19:32:57Z
2017-01-02T19:42:06Z
2017-01-02T19:42:06Z
2017-01-02T19:42:11Z
WIP/ENH: add weights kw to numeric aggregation functions
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index adc17c7514832..6eee7e74a7df2 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -306,13 +306,16 @@ def validate_expanding_func(name, args, kwargs): raise UnsupportedFunctionCall(msg) -def validate_groupby_func(name, args, kwargs): +def validate_groupby_func(name, args, kwargs, allowed_kwargs=None): """ - 'args' and 'kwargs' should be empty because all of + 'args' should be empty because all of their necessary parameters are explicitly listed in the function signature """ - if len(args) + len(kwargs) > 0: + if allowed_kwargs: + kwargs = set(kwargs) - set(allowed_kwargs) + + if len(args) or len(kwargs): raise UnsupportedFunctionCall(( "numpy operations are not valid " "with groupby. Use .groupby(...)." diff --git a/pandas/core/base.py b/pandas/core/base.py index 49e43a60403ca..0cc3782d5db3f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -910,12 +910,18 @@ def hasnans(self): return isnull(self).any() def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform the reduction type operation if we can """ func = getattr(self, name, None) if func is None: raise TypeError("{klass} cannot perform the operation {op}".format( klass=self.__class__.__name__, op=name)) + + if weights is not None: + from pandas.tools import weightby + _, weights = weightby.weightby(self, weights=weights, axis=axis) + kwds['weights'] = weights + return func(**kwds) def value_counts(self, normalize=False, sort=True, ascending=False, diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 0562736038483..ec3d4510cda5b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1747,7 +1747,7 @@ def _reverse_indexer(self): # reduction ops # def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform the reduction type operation """ func = getattr(self, name, None) if func is None: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d96fb094f5d5c..5b9a8481c3a05 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4894,10 +4894,15 @@ def _count_level(self, level, axis=0, numeric_only=False): else: return result - def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + def _reduce(self, op, name, axis=0, skipna=True, weights=None, + numeric_only=None, filter_type=None, **kwds): axis = self._get_axis_number(axis) + if weights is not None: + from pandas.tools import weightby + self, weights = weightby.weightby(self, weights=weights, axis=axis) + kwds['weights'] = weights + def f(x): return op(x, axis=axis, skipna=skipna, **kwds) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 105f9f93f4ca8..46a3868654cd9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4,6 +4,7 @@ import operator import weakref import gc +from textwrap import dedent import numpy as np import pandas.lib as lib @@ -50,6 +51,7 @@ from pandas.compat.numpy import function as nv from pandas.compat import (map, zip, lrange, string_types, isidentifier, set_function_name) +from pandas.tools import weightby import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, deprecate_kwarg from pandas.core import config @@ -2541,146 +2543,91 @@ def tail(self, n=5): return self.iloc[0:0] return self.iloc[-n:] - def sample(self, n=None, frac=None, replace=False, weights=None, - random_state=None, axis=None): - """ - Returns a random sample of items from an axis of object. - - .. versionadded:: 0.16.1 - - Parameters - ---------- - n : int, optional - Number of items from axis to return. Cannot be used with `frac`. - Default = 1 if `frac` = None. - frac : float, optional - Fraction of axis items to return. Cannot be used with `n`. - replace : boolean, optional - Sample with or without replacement. Default = False. - weights : str or ndarray-like, optional - Default 'None' results in equal probability weighting. - If passed a Series, will align with target object on index. Index - values in weights not found in sampled object will be ignored and - index values in sampled object not in weights will be assigned - weights of zero. - If called on a DataFrame, will accept the name of a column - when axis = 0. - Unless weights are a Series, weights must be same length as axis - being sampled. - If weights do not sum to 1, they will be normalized to sum to 1. - Missing values in the weights column will be treated as zero. - inf and -inf values not allowed. - random_state : int or numpy.random.RandomState, optional - Seed for the random number generator (if int), or numpy RandomState - object. - axis : int or string, optional - Axis to sample. Accepts axis number or name. Default is stat axis - for given data type (0 for Series and DataFrames, 1 for Panels). + _shared_docs['sample'] = dedent("""Returns a random sample of items from an axis of object. - Returns - ------- - A new object of same type as caller. +.. versionadded:: 0.16.1 - Examples - -------- - - Generate an example ``Series`` and ``DataFrame``: - - >>> s = pd.Series(np.random.randn(50)) - >>> s.head() - 0 -0.038497 - 1 1.820773 - 2 -0.972766 - 3 -1.598270 - 4 -1.095526 - dtype: float64 - >>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD')) - >>> df.head() - A B C D - 0 0.016443 -2.318952 -0.566372 -1.028078 - 1 -1.051921 0.438836 0.658280 -0.175797 - 2 -1.243569 -0.364626 -0.215065 0.057736 - 3 1.768216 0.404512 -0.385604 -1.457834 - 4 1.072446 -1.137172 0.314194 -0.046661 - - Next extract a random sample from both of these objects... - - 3 random elements from the ``Series``: - - >>> s.sample(n=3) - 27 -0.994689 - 55 -1.049016 - 67 -0.224565 - dtype: float64 +Parameters +---------- +n : int, optional + Number of items from axis to return. Cannot be used with `frac`. + Default = 1 if `frac` = None. +frac : float, optional + Fraction of axis items to return. Cannot be used with `n`. +replace : boolean, optional + Sample with or without replacement. Default = False. +%(weights)s +random_state : int or numpy.random.RandomState, optional + Seed for the random number generator (if int), or numpy RandomState + object. +axis : int or string, optional + Axis to sample. Accepts axis number or name. Default is stat axis + for given data type (0 for Series and DataFrames, 1 for Panels). - And a random 10% of the ``DataFrame`` with replacement: +Returns +------- +A new object of same type as caller. - >>> df.sample(frac=0.1, replace=True) - A B C D - 35 1.981780 0.142106 1.817165 -0.290805 - 49 -1.336199 -0.448634 -0.789640 0.217116 - 40 0.823173 -0.078816 1.009536 1.015108 - 15 1.421154 -0.055301 -1.922594 -0.019696 - 6 -0.148339 0.832938 1.787600 -1.383767 - """ +Examples +-------- +Generate an example ``Series`` and ``DataFrame``: + +>>> s = pd.Series(np.random.randn(50)) +>>> s.head() +0 -0.038497 +1 1.820773 +2 -0.972766 +3 -1.598270 +4 -1.095526 +dtype: float64 +>>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD')) +>>> df.head() + A B C D +0 0.016443 -2.318952 -0.566372 -1.028078 +1 -1.051921 0.438836 0.658280 -0.175797 +2 -1.243569 -0.364626 -0.215065 0.057736 +3 1.768216 0.404512 -0.385604 -1.457834 +4 1.072446 -1.137172 0.314194 -0.046661 + +Next extract a random sample from both of these objects... + +3 random elements from the ``Series``: + +>>> s.sample(n=3) +27 -0.994689 +55 -1.049016 +67 -0.224565 +dtype: float64 + +And a random 10%% of the ``DataFrame`` with replacement: + +>>> df.sample(frac=0.1, replace=True) + A B C D +35 1.981780 0.142106 1.817165 -0.290805 +49 -1.336199 -0.448634 -0.789640 0.217116 +40 0.823173 -0.078816 1.009536 1.015108 +15 1.421154 -0.055301 -1.922594 -0.019696 +6 -0.148339 0.832938 1.787600 -1.383767 +""") + + @Appender(_shared_docs['sample'] % dict( + weights=weightby._shared_docs['weights'], + **_shared_doc_kwargs)) + def sample(self, n=None, frac=None, replace=False, weights=None, + random_state=None, axis=None, **kwargs): if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) - axis_length = self.shape[axis] # Process random_state argument rs = com._random_state(random_state) - # Check weights for compliance if weights is not None: + self, weights = weightby.weightby(self, weights=weights, axis=axis) - # If a series, align with frame - if isinstance(weights, pd.Series): - weights = weights.reindex(self.axes[axis]) - - # Strings acceptable if a dataframe and axis = 0 - if isinstance(weights, string_types): - if isinstance(self, pd.DataFrame): - if axis == 0: - try: - weights = self[weights] - except KeyError: - raise KeyError("String passed to weights not a " - "valid column") - else: - raise ValueError("Strings can only be passed to " - "weights when sampling from rows on " - "a DataFrame") - else: - raise ValueError("Strings cannot be passed as weights " - "when sampling from a Series or Panel.") - - weights = pd.Series(weights, dtype='float64') - - if len(weights) != axis_length: - raise ValueError("Weights and axis to be sampled must be of " - "same length") - - if (weights == np.inf).any() or (weights == -np.inf).any(): - raise ValueError("weight vector may not include `inf` values") - - if (weights < 0).any(): - raise ValueError("weight vector many not include negative " - "values") - - # If has nan, set to zero. - weights = weights.fillna(0) - - # Renormalize if don't sum to 1 - if weights.sum() != 1: - if weights.sum() != 0: - weights = weights / weights.sum() - else: - raise ValueError("Invalid weights: weights sum to zero") - - weights = weights.values + axis_length = self.shape[axis] # If no frac or n, default to n=1. if n is None and frac is None: @@ -5567,14 +5514,20 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, np.putmask(rs.values, mask, np.nan) return rs - def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): + def _agg_by_level(self, name, axis=0, level=0, skipna=True, + weights=None, **kwargs): grouped = self.groupby(level=level, axis=axis) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) axis = self._get_axis_number(axis) method = getattr(type(self), name) - applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs) - return grouped.aggregate(applyf) + + def f(x): + if weights is not None: + kwargs['weights'] = weights + return method(x, axis=axis, skipna=skipna, **kwargs) + + return grouped.aggregate(f) @classmethod def _add_numeric_operations(cls): @@ -5659,19 +5612,19 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_stat_function_weighted( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', nanops.nansum) - cls.mean = _make_stat_function( + cls.mean = _make_stat_function_weighted( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', nanops.nanmean) - cls.skew = _make_stat_function( + cls.skew = _make_stat_function_weighted( cls, 'skew', name, name2, axis_descr, 'Return unbiased skew over requested axis\nNormalized by N-1', nanops.nanskew) - cls.kurt = _make_stat_function( + cls.kurt = _make_stat_function_weighted( cls, 'kurt', name, name2, axis_descr, "Return unbiased kurtosis over requested axis using Fisher's " "definition of\nkurtosis (kurtosis of normal == 0.0). Normalized " @@ -5790,6 +5743,28 @@ def _doc_parms(cls): ------- %(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +_num_weighted_doc = """ + +%(desc)s + +Parameters +---------- +axis : %(axis_descr)s +skipna : boolean, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA +level : int or level name, default None + If the axis is a MultiIndex (hierarchical), count along a + particular level, collapsing into a %(name1)s +%(weights)s +numeric_only : boolean, default None + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. Not implemented for Series. + +Returns +------- +%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" + _num_ddof_doc = """ %(desc)s @@ -5805,6 +5780,7 @@ def _doc_parms(cls): particular level, collapsing into a %(name1)s ddof : int, default 1 degrees of freedom +%(weights)s numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. @@ -5876,12 +5852,34 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, return set_function_name(stat_func, name, cls) +def _make_stat_function_weighted(cls, name, name1, name2, axis_descr, desc, f): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, + weights=weightby._shared_docs['weights']) + @Appender(_num_weighted_doc) + def stat_func(self, axis=None, skipna=None, level=None, weights=None, + numeric_only=None, **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, weights=weights) + return self._reduce(f, name, axis=axis, skipna=skipna, + weights=weights, numeric_only=numeric_only) + + return set_function_name(stat_func, name, cls) + + def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, + weights=weightby._shared_docs['weights']) @Appender(_num_ddof_doc) def stat_func(self, axis=None, skipna=None, level=None, ddof=1, - numeric_only=None, **kwargs): + weights=None, numeric_only=None, **kwargs): nv.validate_stat_ddof_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True @@ -5889,9 +5887,10 @@ def stat_func(self, axis=None, skipna=None, level=None, ddof=1, axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, - skipna=skipna, ddof=ddof) + skipna=skipna, weights=weights, + ddof=ddof) return self._reduce(f, name, axis=axis, numeric_only=numeric_only, - skipna=skipna, ddof=ddof) + weights=weights, skipna=skipna, ddof=ddof) return set_function_name(stat_func, name, cls) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7eba32b4932d0..362263a8b7ce9 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -49,6 +49,7 @@ from pandas.formats.printing import pprint_thing from pandas.util.validators import validate_kwargs +from pandas.tools import weightby import pandas.core.algorithms as algos import pandas.core.common as com from pandas.core.config import option_context @@ -340,10 +341,14 @@ class _GroupBy(PandasObject, SelectionMixin): def __init__(self, obj, keys=None, axis=0, level=None, grouper=None, exclusions=None, selection=None, as_index=True, - sort=True, group_keys=True, squeeze=False, **kwargs): + sort=True, group_keys=True, squeeze=False, ref_obj=None, **kwargs): self._selection = selection + if ref_obj is None: + ref_obj = obj + self.ref_obj = ref_obj + if isinstance(obj, NDFrame): obj._consolidate_inplace() @@ -791,15 +796,23 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, numeric_only=True): + def _cython_agg_general(self, how, weights=None, numeric_only=True): + if weights is not None: + + # TODO, need to integrate this with the exclusions + _, weights = weightby.weightby(self.ref_obj, + weights=weights, + axis=self.axis) + output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) if numeric_only and not is_numeric: continue + values = weightby.weight(obj.values, weights) try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(values, how) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -1006,6 +1019,26 @@ def count(self): # defined here for API doc raise NotImplementedError + @Substitution(name='groupby') + @Appender(_doc_template) + def sum(self, *args, **kwargs): + """ + Compute sum of groups, excluding missing values + + For multiple groupings, the result index will be a MultiIndex + """ + + # TODO: this is slightly different from other cythonized functions (e.g. mean) + # to accomodate np.sum functionaility + nv.validate_groupby_func('sum', args, kwargs, ('weights', 'numeric_only')) + self._set_group_selection() + try: + return self._cython_agg_general('add', **kwargs) + except AssertionError as e: + raise SpecificationError(str(e)) + except Exception: # pragma: no cover + return self.aggregate(lambda x: np.sum(x, axis=self.axis)) + @Substitution(name='groupby') @Appender(_doc_template) def mean(self, *args, **kwargs): @@ -1014,14 +1047,15 @@ def mean(self, *args, **kwargs): For multiple groupings, the result index will be a MultiIndex """ - nv.validate_groupby_func('mean', args, kwargs) + nv.validate_groupby_func('mean', args, kwargs, ('weights', 'numeric_only')) try: - return self._cython_agg_general('mean') + return self._cython_agg_general('mean', **kwargs) except GroupByError: raise except Exception: # pragma: no cover self._set_group_selection() - f = lambda x: x.mean(axis=self.axis) + kwargs['axis'] = self.axis + f = lambda x: x.mean(**kwargs) return self._python_agg_general(f) @Substitution(name='groupby') @@ -1107,7 +1141,6 @@ def size(self): """Compute group sizes""" return self.grouper.size() - sum = _groupby_function('sum', 'add', np.sum) prod = _groupby_function('prod', 'prod', np.prod) min = _groupby_function('min', 'min', np.min, numeric_only=False) max = _groupby_function('max', 'max', np.max, numeric_only=False) @@ -3134,9 +3167,9 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, numeric_only=True): + def _cython_agg_general(self, how, **kwargs): new_items, new_blocks = self._cython_agg_blocks( - how, numeric_only=numeric_only) + how, **kwargs) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3162,9 +3195,17 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, numeric_only=True): + def _cython_agg_blocks(self, how, weights=None, numeric_only=True, + **kwargs): data, agg_axis = self._get_data_to_aggregate() + if weights is not None: + + # TODO, need to integrate this with the exclusions + _, weights = weightby.weightby(self.ref_obj, + weights=weights, + axis=self.axis) + new_blocks = [] if numeric_only: @@ -3172,8 +3213,9 @@ def _cython_agg_blocks(self, how, numeric_only=True): for block in data.blocks: + values = weightby.weight(block.values, weights) result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + values, how, axis=agg_axis) # see if we can cast the block back to the original dtype result = block._try_coerce_and_cast_result(result) @@ -3730,19 +3772,20 @@ def _gotitem(self, key, ndim, subset=None): subset : object, default None subset to act on """ - if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy(subset, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, - as_index=self.as_index) + as_index=self.as_index, + ref_obj=self.obj) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy(subset, selection=key, - grouper=self.grouper) + grouper=self.grouper, + ref_obj=self.obj) raise AssertionError("invalid ndim for _gotitem") diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 1f76bc850cee9..d57cfbc9dd44f 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -23,7 +23,7 @@ from pandas.types.missing import isnull, notnull from pandas.core.common import _values_from_object - +from pandas.tools import weightby class disallow(object): def __init__(self, *dtypes): @@ -71,11 +71,14 @@ def __call__(self, alt): bn_func = None @functools.wraps(alt) - def f(values, axis=None, skipna=True, **kwds): + def f(values, axis=None, skipna=True, weights=None, **kwds): if len(self.kwargs) > 0: for k, v in compat.iteritems(self.kwargs): if k not in kwds: kwds[k] = v + + if weights is not None: + kwds['weights'] = weights try: if self.zero_value is not None and values.size == 0: if values.ndim == 1: @@ -91,7 +94,7 @@ def f(values, axis=None, skipna=True, **kwds): result.fill(0) return result - if (_USE_BOTTLENECK and skipna and + if (_USE_BOTTLENECK and skipna and weights is None and _bn_ok_dtype(values.dtype, bn_name)): result = bn_func(values, axis=axis, **kwds) @@ -101,7 +104,8 @@ def f(values, axis=None, skipna=True, **kwds): result = alt(values, axis=axis, skipna=skipna, **kwds) else: result = alt(values, axis=axis, skipna=skipna, **kwds) - except Exception: + except Exception as e: + try: result = alt(values, axis=axis, skipna=skipna, **kwds) except ValueError as e: @@ -169,11 +173,29 @@ def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): return tslib.iNaT -def _get_values(values, skipna, fill_value=None, fill_value_typ=None, - isfinite=False, copy=True): - """ utility to get the values view, mask, dtype +def _get_values(values, skipna, + fill_value=None, fill_value_typ=None, + isfinite=False, weights=None, axis=None, + copy=True): + """ + utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value - copy = True will force the copy + and adjust for weights + + Parameters + ---------- + values : ndarray + skipna : boolean + fill_value : value, default None + value to fillna + fill_value_typ : value, default None + dtype of the fillvalue + isfinite : boolean, default False + weights : ndarray, optional + normalized ndarray, same length as the axis + axis : axis to broadcast, default None + copy : boolean, default True + True will force the copy """ values = _values_from_object(values) if isfinite: @@ -181,6 +203,8 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, else: mask = isnull(values) + # weights + values = weightby.weight(values, weights) dtype = values.dtype dtype_ok = _na_ok_dtype(dtype) @@ -267,13 +291,16 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch(zero_value=0) -def nansum(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, 0) +def nansum(values, axis=None, skipna=True, weights=None): + values, mask, dtype, dtype_max = _get_values(values, skipna, + 0, weights=weights, + axis=axis) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 + the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask) @@ -282,8 +309,10 @@ def nansum(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nanmean(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, 0) +def nanmean(values, axis=None, skipna=True, weights=None): + values, mask, dtype, dtype_max = _get_values(values, skipna, + 0, weights=weights, + axis=axis) dtype_sum = dtype_max dtype_count = np.float64 @@ -368,14 +397,14 @@ def _get_counts_nanvar(mask, axis, ddof, dtype=float): @disallow('M8') @bottleneck_switch(ddof=1) -def nanstd(values, axis=None, skipna=True, ddof=1): +def nanstd(values, axis=None, skipna=True, ddof=1, weights=None): result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof)) return _wrap_results(result, values.dtype) @disallow('M8') @bottleneck_switch(ddof=1) -def nanvar(values, axis=None, skipna=True, ddof=1): +def nanvar(values, axis=None, skipna=True, ddof=1, weights=None): dtype = values.dtype mask = isnull(values) @@ -414,7 +443,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @disallow('M8', 'm8') -def nansem(values, axis=None, skipna=True, ddof=1): +def nansem(values, axis=None, skipna=True, ddof=1, weights=None): var = nanvar(values, axis, skipna, ddof=ddof) mask = isnull(values) @@ -476,7 +505,7 @@ def nanargmin(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanskew(values, axis=None, skipna=True): +def nanskew(values, axis=None, skipna=True, weights=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized @@ -531,7 +560,7 @@ def nanskew(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nankurt(values, axis=None, skipna=True): +def nankurt(values, axis=None, skipna=True, weights=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f708774dd84ff..8685b51083859 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1101,10 +1101,13 @@ def _apply_2d(self, func, axis): return self._construct_return_type(dict(results)) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): if numeric_only: raise NotImplementedError('Panel.{0} does not implement ' 'numeric_only.'.format(name)) + if weights is not None: + raise NotImplementedError('Panel.{0} does not implement ' + 'weights.'.format(name)) axis_name = self._get_axis_name(axis) axis_number = self._get_axis_number(axis_name) diff --git a/pandas/core/series.py b/pandas/core/series.py index f656d72296e3a..2075270f79f61 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2299,8 +2299,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): return self._constructor(mapped, index=self.index).__finalize__(self) - def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + def _reduce(self, op, name, axis=0, skipna=True, weights=None, + numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation @@ -2308,6 +2308,11 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, otherwise delegate to the object """ + if weights is not None: + from pandas.tools import weightby + _, weights = weightby.weightby(self, weights=weights, axis=axis) + kwds['weights'] = weights + delegate = self._values if isinstance(delegate, np.ndarray): # Validate that 'axis' is consistent with Series's single axis. @@ -2315,6 +2320,7 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) + with np.errstate(all='ignore'): return op(delegate, skipna=skipna, **kwds) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index d6bc892921c42..d2842ae80088a 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -325,7 +325,7 @@ def __array_finalize__(self, obj): self.fill_value = getattr(obj, 'fill_value', None) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform a reduction operation """ return op(self.get_values(), skipna=skipna, **kwds) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3500ce913462a..ba8f5bbfbaf9f 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -449,60 +449,6 @@ def test_sample(self): self.assertTrue(len(o.sample(frac=0.34) == 3)) self.assertTrue(len(o.sample(frac=0.36) == 4)) - ### - # Check weights - ### - - # Weight length must be right - with tm.assertRaises(ValueError): - o.sample(n=3, weights=[0, 1]) - - with tm.assertRaises(ValueError): - bad_weights = [0.5] * 11 - o.sample(n=3, weights=bad_weights) - - with tm.assertRaises(ValueError): - bad_weight_series = Series([0, 0, 0.2]) - o.sample(n=4, weights=bad_weight_series) - - # Check won't accept negative weights - with tm.assertRaises(ValueError): - bad_weights = [-0.1] * 10 - o.sample(n=3, weights=bad_weights) - - # Check inf and -inf throw errors: - with tm.assertRaises(ValueError): - weights_with_inf = [0.1] * 10 - weights_with_inf[0] = np.inf - o.sample(n=3, weights=weights_with_inf) - - with tm.assertRaises(ValueError): - weights_with_ninf = [0.1] * 10 - weights_with_ninf[0] = -np.inf - o.sample(n=3, weights=weights_with_ninf) - - # All zeros raises errors - zero_weights = [0] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=zero_weights) - - # All missing weights - nan_weights = [np.nan] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=nan_weights) - - # Check np.nan are replaced by zeros. - weights_with_nan = [np.nan] * 10 - weights_with_nan[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) - - # Check None are also replaced by zeros. - weights_with_None = [None] * 10 - weights_with_None[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) - def test_size_compat(self): # GH8846 # size property should be defined @@ -1579,123 +1525,6 @@ def tester(self): class TestNDFrame(tm.TestCase): # tests that don't fit elsewhere - def test_sample(sel): - # Fixes issue: 2419 - # additional specific object based tests - - # A few dataframe test with degenerate weights. - easy_weight_list = [0] * 10 - easy_weight_list[5] = 1 - - df = pd.DataFrame({'col1': range(10, 20), - 'col2': range(20, 30), - 'colString': ['a'] * 10, - 'easyweights': easy_weight_list}) - sample1 = df.sample(n=1, weights='easyweights') - assert_frame_equal(sample1, df.iloc[5:6]) - - # Ensure proper error if string given as weight for Series, panel, or - # DataFrame with axis = 1. - s = Series(range(10)) - with tm.assertRaises(ValueError): - s.sample(n=3, weights='weight_column') - - panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4], - minor_axis=[3, 4, 5]) - with tm.assertRaises(ValueError): - panel.sample(n=1, weights='weight_column') - - with tm.assertRaises(ValueError): - df.sample(n=1, weights='weight_column', axis=1) - - # Check weighting key error - with tm.assertRaises(KeyError): - df.sample(n=3, weights='not_a_real_column_name') - - # Check that re-normalizes weights that don't sum to one. - weights_less_than_1 = [0] * 10 - weights_less_than_1[0] = 0.5 - tm.assert_frame_equal( - df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) - - ### - # Test axis argument - ### - - # Test axis argument - df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) - second_column_weight = [0, 1] - assert_frame_equal( - df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) - - # Different axis arg types - assert_frame_equal(df.sample(n=1, axis='columns', - weights=second_column_weight), - df[['col2']]) - - weight = [0] * 10 - weight[5] = 0.5 - assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), - df.iloc[5:6]) - assert_frame_equal(df.sample(n=1, axis='index', weights=weight), - df.iloc[5:6]) - - # Check out of range axis values - with tm.assertRaises(ValueError): - df.sample(n=1, axis=2) - - with tm.assertRaises(ValueError): - df.sample(n=1, axis='not_a_name') - - with tm.assertRaises(ValueError): - s = pd.Series(range(10)) - s.sample(n=1, axis=1) - - # Test weight length compared to correct axis - with tm.assertRaises(ValueError): - df.sample(n=1, axis=1, weights=[0.5] * 10) - - # Check weights with axis = 1 - easy_weight_list = [0] * 3 - easy_weight_list[2] = 1 - - df = pd.DataFrame({'col1': range(10, 20), - 'col2': range(20, 30), - 'colString': ['a'] * 10}) - sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) - assert_frame_equal(sample1, df[['colString']]) - - # Test default axes - p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6], - minor_axis=[1, 3, 5]) - assert_panel_equal( - p.sample(n=3, random_state=42), p.sample(n=3, axis=1, - random_state=42)) - assert_frame_equal( - df.sample(n=3, random_state=42), df.sample(n=3, axis=0, - random_state=42)) - - # Test that function aligns weights with frame - df = DataFrame( - {'col1': [5, 6, 7], - 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) - s = Series([1, 0, 0], index=[3, 5, 9]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s)) - - # Weights have index values to be dropped because not in - # sampled DataFrame - s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2)) - - # Weights have empty values to be filed with zeros - s3 = Series([0.01, 0], index=[3, 5]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3)) - - # No overlap in weight and sampled DataFrame indices - s4 = Series([1, 0], index=[1, 2]) - with tm.assertRaises(ValueError): - df.sample(1, weights=s4) - def test_squeeze(self): # noop for s in [tm.makeFloatSeries(), tm.makeStringSeries(), diff --git a/pandas/tools/tests/test_weightby.py b/pandas/tools/tests/test_weightby.py new file mode 100644 index 0000000000000..34aa6399e2cbc --- /dev/null +++ b/pandas/tools/tests/test_weightby.py @@ -0,0 +1,246 @@ +import numpy as np +import pandas as pd + +from pandas import DataFrame, Series +from pandas.util import testing as tm +from pandas.core import common as com + + +class TestWeightsby(tm.TestCase): + + def setUp(self): + self.df = DataFrame({'A': [0.25, 0.25, 0.25, 0.25], + 'B': [1, 2, 3, 4]}) + self.df2 = DataFrame({'A': [1, 2, 3, 4], + 'B': [1, 2, 3, 4]}) + self.df3 = DataFrame({'A': [1, 2, 3, 4], + 'B': [1, 2, 3, 4], + 'C': [1, 1, 2, 2]}) + + @property + def rs(self): + # always return the same starting random state object + return com._random_state(1234) + + def test_basic(self): + + for f in ['sum', 'mean']: + weights = (self.df[['A']] / self.df.A.sum()).values + result = getattr(self.df, f)(weights='A') + expected = getattr(self.df[['B']] * weights, f)() + tm.assert_series_equal(result, expected) + + weights2 = (self.df2[['A']] / self.df2.A.sum()).values + result = getattr(self.df2, f)(weights='A') + expected = getattr(self.df2[['B']] * weights2, f)() + tm.assert_series_equal(result, expected) + + for f in ['kurt', 'skew', 'sem']: + weights = (self.df[['A']] / self.df.A.sum()).values + result = getattr(self.df, f)(weights='A') + expected = getattr(self.df[['B']] * weights, f)() + # tm.assert_series_equal(result, expected) + + weights2 = (self.df2[['A']] / self.df2.A.sum()).values + result = getattr(self.df2, f)(weights='A') + expected = getattr(self.df2[['B']] * weights2, f)() + # tm.assert_series_equal(result, expected) + + for f in ['std', 'var']: + + weights = (self.df[['A']] / self.df.A.sum()).values + result = getattr(self.df, f)(weights='A', ddof=2) + expected = getattr(self.df[['B']] * weights, f)(ddof=2) + # tm.assert_series_equal(result, expected) + + weights2 = (self.df2[['A']] / self.df2.A.sum()).values + result = getattr(self.df2, f)(weights='A', ddof=2) + expected = getattr(self.df2[['B']] * weights2, f)(ddof=2) + # tm.assert_series_equal(result, expected) + + def test_groupby(self): + + for f in ['mean', 'sum']: + + weights = (self.df3['A'] / self.df3.A.sum()).values + result = getattr(self.df3.groupby('C'), f)(weights='A') + adj = self.df3.assign(A=self.df3.A * weights, + B=self.df3.B * weights) + expected = getattr(adj.groupby('C'), f)() + tm.assert_frame_equal(result, expected) + + weights = (self.df3['A'] / self.df3.A.sum()).values + result = getattr(self.df3.groupby('C').B, f)(weights='A') + adj = self.df3.assign(B=self.df3.B * weights) + expected = getattr(adj.groupby('C').B, f)() + tm.assert_series_equal(result, expected) + + def test_unsupported(self): + for f in ['first', 'median', 'min', 'max', 'prod']: + + def func(): + getattr(self.df, f)(weights='A') + self.assertRaises(TypeError, func) + + def test_panel_unsupported(self): + panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4], + minor_axis=[3, 4, 5]) + with tm.assertRaises(NotImplementedError): + panel.sum(weights='weight_column') + + def test_weights_validation(self): + o = DataFrame(np.random.randn(10, 10)) + + # Weight length must be right + with tm.assertRaises(ValueError): + o.sample(n=3, random_state=self.rs, weights=[0, 1]) + + with tm.assertRaises(ValueError): + bad_weights = [0.5] * 11 + o.sample(n=3, random_state=self.rs, weights=bad_weights) + + # Check won't accept negative weights + with tm.assertRaises(ValueError): + bad_weights = [-0.1] * 10 + o.sample(n=3, random_state=self.rs, weights=bad_weights) + + # Check inf and -inf throw errors: + with tm.assertRaises(ValueError): + weights_with_inf = [0.1] * 10 + weights_with_inf[0] = np.inf + o.sample(n=3, random_state=self.rs, weights=weights_with_inf) + + with tm.assertRaises(ValueError): + weights_with_ninf = [0.1] * 10 + weights_with_ninf[0] = -np.inf + o.sample(n=3, random_state=self.rs, weights=weights_with_ninf) + + # All zeros raises errors + zero_weights = [0] * 10 + with tm.assertRaises(ValueError): + o.sample(n=3, random_state=self.rs, weights=zero_weights) + + # All missing weights + nan_weights = [np.nan] * 10 + with tm.assertRaises(ValueError): + o.sample(n=3, random_state=self.rs, weights=nan_weights) + + # Check np.nan are replaced by zeros. + weights_with_nan = [np.nan] * 10 + weights_with_nan[5] = 0.5 + result = o.sample(n=1, random_state=self.rs, weights=weights_with_nan) + expected = o.iloc[5:6] + tm.assert_frame_equal(result, expected) + + # Check None are also replaced by zeros. + weights_with_None = [None] * 10 + weights_with_None[5] = 0.5 + result = o.sample(n=1, random_state=self.rs, weights=weights_with_None) + expected = o.iloc[5:6] + tm.assert_frame_equal(result, expected) + + def test_weights_strings(self): + # Fixes issue: 2419 + # additional specific object based tests + + # A few dataframe test with degenerate weights. + easy_weight_list = [0] * 10 + easy_weight_list[5] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10, + 'easyweights': easy_weight_list}) + result = df.sample(n=1, random_state=self.rs, weights='easyweights') + expected = df[['col1', 'col2', 'colString']].iloc[5:6] + tm.assert_frame_equal(result, expected) + + # Ensure proper error if string given as weight for Series, panel, or + # DataFrame with axis = 1. + s = Series(range(10)) + with tm.assertRaises(ValueError): + s.sample(n=3, random_state=self.rs, weights='weight_column') + + with tm.assertRaises(ValueError): + df.sample(n=1, random_state=self.rs, + weights='weight_column', axis=1) + + # Check weighting key error + with tm.assertRaises(KeyError): + df.sample(n=3, random_state=self.rs, + weights='not_a_real_column_name') + + # Check that re-normalizes weights that don't sum to one. + weights_less_than_1 = [0] * 10 + weights_less_than_1[0] = 0.5 + result = df.sample(n=1, random_state=self.rs, + weights=weights_less_than_1) + expected = df.iloc[[0]] + tm.assert_frame_equal(result, expected) + + def test_weights_axis(self): + + # Test axis argument + df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) + second_column_weight = [0, 1] + result = df.sample(n=1, random_state=self.rs, + weights=second_column_weight, axis=1) + tm.assert_frame_equal(result, df[['col2']]) + + # Different axis arg types + result = df.sample(n=1, random_state=self.rs, + weights=second_column_weight, axis='columns') + tm.assert_frame_equal(result, df[['col2']]) + + weight = [0] * 10 + weight[5] = 0.5 + result = df.sample(n=1, random_state=self.rs, + weights=weight, axis='index') + expected = df.iloc[5:6] + tm.assert_frame_equal(result, expected) + + # Test weight length compared to correct axis + with tm.assertRaises(ValueError): + df.sample(n=1, random_state=self.rs, weights=[0.5] * 10, axis=1) + + # Check weights with axis = 1 + easy_weight_list = [0] * 3 + easy_weight_list[2] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10}) + result = df.sample(n=1, random_state=self.rs, + weights=easy_weight_list, axis=1) + expected = df[['colString']] + tm.assert_frame_equal(result, expected) + + # Test that function aligns weights with frame + df = DataFrame( + {'col1': [5, 6, 7], + 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) + s = Series([1, 0, 0], index=[3, 5, 9]) + result = df.sample(1, random_state=self.rs, weights=s) + tm.assert_frame_equal(result, df.loc[[3]]) + + # Weights have index values to be dropped because not in + # sampled DataFrame + s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) + result = df.sample(1, random_state=self.rs, weights=s2) + tm.assert_frame_equal(result, df.loc[[3]]) + + # Weights have empty values to be filed with zeros + s3 = Series([0.01, 0], index=[3, 5]) + result = df.sample(1, random_state=self.rs, weights=s3) + tm.assert_frame_equal(result, df.loc[[3]]) + + # No overlap in weight and sampled DataFrame indices + s4 = Series([1, 0], index=[1, 2]) + with tm.assertRaises(ValueError): + df.sample(1, random_state=self.rs, weights=s4) + + +if __name__ == '__main__': + import nose + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tools/weightby.py b/pandas/tools/weightby.py new file mode 100644 index 0000000000000..3022d2af0ab15 --- /dev/null +++ b/pandas/tools/weightby.py @@ -0,0 +1,123 @@ +""" +functions to compute weighting Indexes +""" + +import numpy as np +from pandas.types.generic import ABCSeries, ABCDataFrame +from pandas.compat import string_types +from pandas.util.decorators import Substitution + +_shared_docs = {} +_shared_docs['weights'] = """weights : str or ndarray-like, optional + Default 'None' results in equal probability weighting. + + If passed a Series, will align with target object on index. + Index values in weights not found in the target object + will be ignored and index values in the target object + not in weights will be assigned weights of zero. + + If called on a DataFrame, will accept the name of a column + when axis = 0. + + Unless weights are a Series, weights must be same length + as axis of the target object. + + If weights do not sum to 1, they will be normalized to sum to 1. + + Missing values in the weights column will be treated as zero. + inf and -inf values not allowed.""" + + +@Substitution(weights=_shared_docs['weights']) +def weightby(obj, weights=None, axis=0): + """returns a weights Series for the specified weights + +Paramaters +---------- +obj : Series/DataFrame +%(weights)s +axis : {0 (index), 1 (columns)} + axis to compute weights on obj + +Returns +------- +tuple of (obj, ndarray of weights, like indexed to obj)""" + + # If a series, align with frame + if isinstance(weights, ABCSeries): + weights = weights.reindex(obj.axes[axis]) + + # Strings acceptable if a dataframe and axis = 0 + if isinstance(weights, string_types): + + # we use self.obj as we may have a selection here + if isinstance(obj, ABCDataFrame): + if axis == 0: + try: + w, weights = weights, obj[weights] + + # remove the weights column from obj + obj = obj.drop([w], axis=1) + except KeyError: + raise KeyError("String passed to weights is not a " + "valid column") + else: + raise ValueError("Strings can only be passed to " + "weights when weighting by the rows on " + "a DataFrame") + else: + raise ValueError("Strings cannot be passed as weights " + "when weighting from a Series or Panel.") + + from pandas import Series + weights = Series(weights, dtype='float64') + + if len(weights) != len(obj.axes[axis]): + raise ValueError("Weights and axis to be must be of " + "same length") + + if (weights == np.inf).any() or (weights == -np.inf).any(): + raise ValueError("weight vector may not include `inf` values") + + if (weights < 0).any(): + raise ValueError("weight vector many not include negative " + "values") + + # If has nan, set to zero. + weights = weights.fillna(0) + + # Renormalize if don't sum to 1 + if weights.sum() != 1: + if weights.sum() != 0: + weights = weights / weights.sum() + else: + raise ValueError("Invalid weights: weights sum to zero") + + return obj, weights.values + + +def weight(values, weights): + """ + Return the values * weights, broadcasting if needed + + Parameters + ---------- + values : ndarray + weights : 1d-ndarray + + Returns + ------- + values shaped ndarray + """ + + if weights is None: + return values + + if values.ndim == 1: + return values * weights + + elif values.ndim == 2: + + return values * weights + + raise NotImplementedError
closes #10030 alt to #15031 ``` In [5]: df = DataFrame({'A': [1, 1, 2, 2], ...: 'B': [1, 2, 3, 4]}) In [6]: df Out[6]: A B 0 1 1 1 1 2 2 2 3 3 2 4 In [7]: df.mean() Out[7]: A 1.5 B 2.5 dtype: float64 In [8]: df.mean(weights='A') Out[8]: A 0.416667 B 0.708333 dtype: float64 ``` New signatures ``` In [9]: Series.mean? Signature: Series.mean(self, axis=None, skipna=None, level=None, weights=None, numeric_only=None, **kwargs) Docstring: Return the mean of the values for the requested axis Parameters ---------- axis : {index (0)} skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in the target object will be ignored and index values in the target object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis of the target object. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- mean : scalar or Series (if level specified) File: ~/pandas/pandas/core/generic.py Type: function ```
https://api.github.com/repos/pandas-dev/pandas/pulls/15039
2017-01-02T18:36:13Z
2017-02-27T16:03:21Z
null
2019-12-11T19:04:18Z
[WIP]: Add quantile shortcut
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7eba32b4932d0..7f50434354ee0 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1024,6 +1024,22 @@ def mean(self, *args, **kwargs): f = lambda x: x.mean(axis=self.axis) return self._python_agg_general(f) + @Substitution(name='groupby') + @Appender(_doc_template) + def quantile(self, *args, **kwargs): + """ + Compute quantile of groups, excluding missing values + """ + nv.validate_groupby_func('quantile', args, kwargs) + try: + return self._cython_agg_general('quantile') + except GroupByError: + raise + except Exception: # pragma: no cover + self._set_group_selection() + f = lambda x: x.quantile(axis=self.axis) + return self._python_agg_general(f) + @Substitution(name='groupby') @Appender(_doc_template) def median(self): diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index e6d500144fa44..4332b704c1c1e 100755 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -329,6 +329,23 @@ def aggregate(self, arg, *args, **kwargs): agg = aggregate apply = aggregate + def quantile(self, arg, *args, **kwargs): + """ + quantile method on resample - `resample(..).quantile(..)` + is a short-cut to `resample.agg(lambda x: x.quantile(0.75))` + Parameters + ---------- + arg : lambda function + + Examples + -------- + >>> resampled.quantile(0.75) + + Returns + ------- + """ + return self.agg(lambda x: x.quantile(arg), *args, **kwargs) + def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return
- [x] closes #15023 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/15032
2017-01-01T19:15:46Z
2017-03-28T18:01:45Z
null
2017-04-28T16:10:39Z
WIP/ENH: Weightby
diff --git a/doc/source/api.rst b/doc/source/api.rst index 272dfe72eafe7..ee05532e0ae3a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -314,6 +314,8 @@ Function application, GroupBy & Window :toctree: generated/ Series.apply + Series.aggregate + Series.transform Series.map Series.groupby Series.rolling @@ -833,6 +835,8 @@ Function application, GroupBy & Window DataFrame.apply DataFrame.applymap + DataFrame.aggregate + DataFrame.transform DataFrame.groupby DataFrame.rolling DataFrame.expanding diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 2e8abe0a5c329..7d4a776203a2d 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -702,7 +702,8 @@ on an entire ``DataFrame`` or ``Series``, row- or column-wise, or elementwise. 1. `Tablewise Function Application`_: :meth:`~DataFrame.pipe` 2. `Row or Column-wise Function Application`_: :meth:`~DataFrame.apply` -3. Elementwise_ function application: :meth:`~DataFrame.applymap` +3. `Aggregation API`_: :meth:`~DataFrame.agg` and :meth:`~DataFrame.transform` +4. `Applying Elementwise Functions`_: :meth:`~DataFrame.applymap` .. _basics.pipe: @@ -778,6 +779,13 @@ statistics methods, take an optional ``axis`` argument: df.apply(np.cumsum) df.apply(np.exp) +``.apply()`` will also dispatch on a string method name. + +.. ipython:: python + + df.apply('mean') + df.apply('mean', axis=1) + Depending on the return type of the function passed to :meth:`~DataFrame.apply`, the result will either be of lower dimension or the same dimension. @@ -827,16 +835,234 @@ set to True, the passed function will instead receive an ndarray object, which has positive performance implications if you do not need the indexing functionality. -.. seealso:: +.. _basics.aggregate: + +Aggregation API +~~~~~~~~~~~~~~~ + +.. versionadded:: 0.20.0 + +The aggregation API allows one to express possibly multiple aggregation operations in a single concise way. +This API is similar across pandas objects, :ref:`groupby aggregates <groupby.aggregate>`, +:ref:`window functions <stats.aggregate>`, and the :ref:`resample API <timeseries.aggregate>`. + +We will use a similar starting frame from above. + +.. ipython:: python + + tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + tsdf.iloc[3:7] = np.nan + tsdf + +Using a single function is equivalent to ``.apply``; You can also pass named methods as strings. +This will return a Series of the output. + +.. ipython:: python + + tsdf.agg(np.sum) + + tsdf.agg('sum') + + # these are equivalent to a ``.sum()`` because we are aggregating on a single function + tsdf.sum() + +On a Series this will result in a scalar value + +.. ipython:: python + + tsdf.A.agg('sum') + + +Aggregating multiple functions at once +++++++++++++++++++++++++++++++++++++++ + +You can pass arguments as a list. The results of each of the passed functions will be a row in the resultant DataFrame. +These are naturally named from the aggregation function. + +.. ipython:: python + + tsdf.agg(['sum']) + +Multiple functions yield multiple rows. - The section on :ref:`GroupBy <groupby>` demonstrates related, flexible - functionality for grouping by some criterion, applying, and combining the - results into a Series, DataFrame, etc. +.. ipython:: python + + tsdf.agg(['sum', 'mean']) + +On a Series, multiple functions return a Series, indexed by the function names. + +.. ipython:: python + + tsdf.A.agg(['sum', 'mean']) + + +Aggregating with a dict of functions +++++++++++++++++++++++++++++++++++++ + +Passing a dictionary of column name to function or list of functions, to ``DataFame.agg`` +allows you to customize which functions are applied to which columns. + +.. ipython:: python + + tsdf.agg({'A': 'mean', 'B': 'sum'}) + +Passing a list-like will generate a DataFrame output. You will get a matrix-like output +of all of the aggregators; some may be missing values. + +.. ipython:: python + + tsdf.agg({'A': ['mean', 'min'], 'B': 'sum'}) -.. _Elementwise: +For a Series, you can pass a dict. You will get back a MultiIndex Series; The outer level will +be the keys, the inner the name of the functions. + +.. ipython:: python + + tsdf.A.agg({'foo' : ['sum', 'mean']}) + +Alternatively, using multiple dictionaries, you can have renamed elements with the aggregation + +.. ipython:: python + + tsdf.A.agg({'foo' : 'sum', 'bar': 'mean'}) + +Multiple keys will yield a MultiIndex Series. The outer level will be the keys, the inner +the names of the functions. + +.. ipython:: python + + tsdf.A.agg({'foo' : ['sum', 'mean'], 'bar': ['min', 'max', lambda x: x.sum()+1]}) + +.. _basics.aggregation.mixed_dtypes: + +Mixed Dtypes +++++++++++++ + +When presented with mixed dtypes that cannot aggregate, ``.agg`` will only take the valid +aggregations. This is similiar to how groupby ``.agg`` works. + +.. ipython:: python -Applying elementwise Python functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + mdf = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + mdf.dtypes + +.. ipython:: python + + mdf.agg(['min', 'sum']) + +.. _basics.aggregation.custom_describe: + +Custom describe ++++++++++++++++ + +With ``.agg()`` is it possible to easily create a custom describe function, similar +to the built in :ref:`describe function <basics.describe>`. + +.. ipython:: python + + from functools import partial + + q_25 = partial(pd.Series.quantile, q=0.25) + q_25.__name__ = '25%' + q_75 = partial(pd.Series.quantile, q=0.75) + q_75.__name__ = '75%' + + tsdf.agg(['count', 'mean', 'std', 'min', q_25, 'median', q_75, 'max']) + +.. _basics.transform: + +Transform API +~~~~~~~~~~~~~ + +.. versionadded:: 0.20.0 + +The ``transform`` method returns an object that is indexed the same (same size) +as the original. This API allows you to provide *multiple* operations at the same +time rather than one-by-one. Its api is quite similar to the ``.agg`` API. + +Use a similar frame to the above sections. + +.. ipython:: python + + tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + tsdf.iloc[3:7] = np.nan + tsdf + +Transform the entire frame. Transform allows functions to input as a numpy function, string +function name and user defined function. + +.. ipython:: python + + tsdf.transform(np.abs) + tsdf.transform('abs') + tsdf.transform(lambda x: x.abs()) + +Since this is a single function, this is equivalent to a ufunc application + +.. ipython:: python + + np.abs(tsdf) + +Passing a single function to ``.transform()`` with a Series will yield a single Series in return. + +.. ipython:: python + + tsdf.A.transform(np.abs) + + +Transform with multiple functions ++++++++++++++++++++++++++++++++++ + +Passing multiple functions will yield a column multi-indexed DataFrame. +The first level will be the original frame column names; the second level +will be the names of the transforming functions. + +.. ipython:: python + + tsdf.transform([np.abs, lambda x: x+1]) + +Passing multiple functions to a Series will yield a DataFrame. The +resulting column names will be the transforming functions. + +.. ipython:: python + + tsdf.A.transform([np.abs, lambda x: x+1]) + + +Transforming with a dict of functions ++++++++++++++++++++++++++++++++++++++ + + +Passing a dict of functions will will allow selective transforming per column. + +.. ipython:: python + + tsdf.transform({'A': np.abs, 'B': lambda x: x+1}) + +Passing a dict of lists will generate a multi-indexed DataFrame with these +selective transforms. + +.. ipython:: python + + tsdf.transform({'A': np.abs, 'B': [lambda x: x+1, 'sqrt']}) + +On a Series, passing a dict allows renaming as in ``.agg()`` + +.. ipython:: python + + tsdf.A.transform({'foo': np.abs}) + tsdf.A.transform({'foo': np.abs, 'bar': [lambda x: x+1, 'sqrt']}) + + +.. _basics.elementwise: + +Applying Elementwise Functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since not all functions can be vectorized (accept NumPy arrays and return another array or value), the methods :meth:`~DataFrame.applymap` on DataFrame diff --git a/doc/source/computation.rst b/doc/source/computation.rst index a19a56f6f1905..730c10e3393b1 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -565,7 +565,9 @@ Aggregation ----------- Once the ``Rolling``, ``Expanding`` or ``EWM`` objects have been created, several methods are available to -perform multiple computations on the data. This is very similar to a ``.groupby(...).agg`` seen :ref:`here <groupby.aggregate>`. +perform multiple computations on the data. These operations are similar to the :ref:`aggregating API <basics.aggregate>`, +:ref:`groupby aggregates <groupby.aggregate>`, and :ref:`resample API <timeseries.aggregate>`. + .. ipython:: python diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 45af02cb60b25..d72ab7a9b121b 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -439,7 +439,9 @@ Aggregation ----------- Once the GroupBy object has been created, several methods are available to -perform a computation on the grouped data. +perform a computation on the grouped data. These operations are similar to the +:ref:`aggregating API <basics.aggregate>`, :ref:`window functions <stats.aggregate>`, +and :ref:`resample API <timeseries.aggregate>`. An obvious one is aggregation via the ``aggregate`` or equivalently ``agg`` method: diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index e09d240ed91b7..5543e36a7153e 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1470,11 +1470,13 @@ We can instead only resample those groups where we have points as follows: ts.groupby(partial(round, freq='3T')).sum() +.. _timeseries.aggregate: + Aggregation ~~~~~~~~~~~ -Similar to :ref:`groupby aggregates <groupby.aggregate>` and the :ref:`window functions <stats.aggregate>`, a ``Resampler`` can be selectively -resampled. +Similar to the :ref:`aggregating API <basics.aggregate>`, :ref:`groupby aggregates <groupby.aggregate>`, and :ref:`window functions <stats.aggregate>`, +a ``Resampler`` can be selectively resampled. Resampling a ``DataFrame``, the default will be to act on all columns with the same function. diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index bbf528a50e1bb..0c8125ca9a802 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -9,6 +9,8 @@ users upgrade to this version. Highlights include: +- new ``.agg()`` API for Series/DataFrame similar to the groupby-rolling-resample API's, see :ref:`here <whatsnew_0200.enhancements.agg>` +- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`. - Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`) Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations <whatsnew_0200.deprecations>` before updating. @@ -22,9 +24,73 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations New features ~~~~~~~~~~~~ -- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here <io.feather>`. +.. _whatsnew_0200.enhancements.agg: + +``agg`` API +^^^^^^^^^^^ + +Series & DataFrame have been enhanced to support the aggregation API. This is an already familiar API that +is supported for groupby, windows operations, and resampling. This allows one to express, possibly multiple +aggregation operations in a single concise way by using ``.agg()`` and ``.transform()``. The +full documentation is :ref:`here <basics.aggregate>`` (:issue:`1623`) + +Here is a sample + +.. ipython:: python + + df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + df.iloc[3:7] = np.nan + df + +One can operate using string function names, callables, lists, or dictionaries of these. + +Using a single function is equivalent to ``.apply``. + +.. ipython:: python + + df.agg('sum') + +Multiple functions in lists. + +.. ipython:: python + + df.agg(['sum', 'min']) +Dictionaries to provide the ability to selective calculation. + +.. ipython:: python + + df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) + +When operating on a Series, passing a dictionry allows one to rename multiple +function aggregates; this will return a MultiIndexed Series. The outer level +are the keys, the inner are the names of the functions. + +.. ipython:: python + + df.A.agg({'foo':['sum', 'min'], 'bar' : ['count','max']}) + +The API also supports a ``.transform()`` function to provide for broadcasting results. + +.. ipython:: python + + df.transform(['abs', lambda x: x-x.min()]) + +When presented with mixed dtypes that cannot aggregate, ``.agg`` will only take the valid +aggregations. This is similiar to how groupby ``.agg`` works. (:issue:`15015`) + +.. ipython:: python + + df = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + df.dtypes + +.. ipython:: python + df.agg(['min', 'sum']) .. _whatsnew_0200.enhancements.dataio_dtype: diff --git a/pandas/core/base.py b/pandas/core/base.py index 49e43a60403ca..5ffdb4bc66234 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -289,7 +289,9 @@ class SelectionMixin(object): } @property - def name(self): + def _selection_name(self): + """ return a name for myself; this would ideally be the 'name' property, but + we cannot conflict with the Series.name property which can be set """ if self._selection is None: return None # 'result' else: @@ -404,6 +406,26 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate + def _try_aggregate_string_function(self, arg, *args, **kwargs): + """ + if arg is a string, then try to operate on it: + - try to find a function on ourselves + - try to find a numpy function + - raise + + """ + assert isinstance(arg, compat.string_types) + + f = getattr(self, arg, None) + if f is not None: + return f(*args, **kwargs) + + f = getattr(np, arg, None) + if f is not None: + return f(self, *args, **kwargs) + + raise ValueError("{} is an unknown string function".format(arg)) + def _aggregate(self, arg, *args, **kwargs): """ provide an implementation for the aggregators @@ -427,14 +449,19 @@ def _aggregate(self, arg, *args, **kwargs): is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) is_nested_renamer = False + _axis = kwargs.pop('_axis', None) + if _axis is None: + _axis = getattr(self, 'axis', 0) _level = kwargs.pop('_level', None) + if isinstance(arg, compat.string_types): - return getattr(self, arg)(*args, **kwargs), None + return self._try_aggregate_string_function(arg, *args, + **kwargs), None if isinstance(arg, dict): # aggregate based on the passed dict - if self.axis != 0: # pragma: no cover + if _axis != 0: # pragma: no cover raise ValueError('Can only pass dict with axis=0') obj = self._selected_obj @@ -554,32 +581,74 @@ def _agg(arg, func): result = _agg(arg, _agg_2dim) # combine results + + def is_any_series(): + # return a boolean if we have *any* nested series + return any([isinstance(r, ABCSeries) + for r in compat.itervalues(result)]) + + def is_any_frame(): + # return a boolean if we have *any* nested series + return any([isinstance(r, ABCDataFrame) + for r in compat.itervalues(result)]) + if isinstance(result, list): - result = concat(result, keys=keys, axis=1) - elif isinstance(list(compat.itervalues(result))[0], - ABCDataFrame): - result = concat([result[k] for k in keys], keys=keys, axis=1) - else: - from pandas import DataFrame + return concat(result, keys=keys, axis=1), True + + elif is_any_frame(): + # we have a dict of DataFrames + # return a MI DataFrame + + return concat([result[k] for k in keys], + keys=keys, axis=1), True + + elif isinstance(self, ABCSeries) and is_any_series(): + + # we have a dict of Series + # return a MI Series + try: + result = concat(result) + except TypeError: + # we want to give a nice error here if + # we have non-same sized objects, so + # we don't automatically broadcast + + raise ValueError("cannot perform both aggregation " + "and transformation operations " + "simultaneously") + + return result, True + + # fall thru + from pandas import DataFrame, Series + try: result = DataFrame(result) + except ValueError: + + # we have a dict of scalars + result = Series(result, + name=getattr(self, 'name', None)) return result, True - elif hasattr(arg, '__iter__'): - return self._aggregate_multiple_funcs(arg, _level=_level), None + elif is_list_like(arg) and arg not in compat.string_types: + # we require a list, but not an 'str' + return self._aggregate_multiple_funcs(arg, + _level=_level, + _axis=_axis), None else: result = None - cy_func = self._is_cython_func(arg) - if cy_func and not args and not kwargs: - return getattr(self, cy_func)(), None + f = self._is_cython_func(arg) + if f and not args and not kwargs: + return getattr(self, f)(), None # caller can react return result, True - def _aggregate_multiple_funcs(self, arg, _level): + def _aggregate_multiple_funcs(self, arg, _level, _axis): from pandas.tools.merge import concat - if self.axis != 0: + if _axis != 0: raise NotImplementedError("axis other than 0 is not supported") if self._selected_obj.ndim == 1: @@ -614,10 +683,30 @@ def _aggregate_multiple_funcs(self, arg, _level): keys.append(col) except (TypeError, DataError): pass + except ValueError: + # cannot aggregate + continue except SpecificationError: raise - return concat(results, keys=keys, axis=1) + # if we are empty + if not len(results): + raise ValueError("no results") + + try: + return concat(results, keys=keys, axis=1) + except TypeError: + + # we are concatting non-NDFrame objects, + # e.g. a list of scalars + + from pandas.types.cast import _is_nested_object + from pandas import Series + result = Series(results, index=keys, name=self.name) + if _is_nested_object(result): + raise ValueError("cannot combine transform and " + "aggregation operations") + return result def _shallow_copy(self, obj=None, obj_type=None, **kwargs): """ return a new object with the replacement attributes """ @@ -910,12 +999,15 @@ def hasnans(self): return isnull(self).any() def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform the reduction type operation if we can """ func = getattr(self, name, None) if func is None: raise TypeError("{klass} cannot perform the operation {op}".format( klass=self.__class__.__name__, op=name)) + if weights is not None: + kwds['weights'] = weights + return func(**kwds) def value_counts(self, normalize=False, sort=True, ascending=False, diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 0562736038483..ec3d4510cda5b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1747,7 +1747,7 @@ def _reverse_indexer(self): # reduction ops # def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform the reduction type operation """ func = getattr(self, name, None) if func is None: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d96fb094f5d5c..b791798c0b087 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4029,6 +4029,42 @@ def diff(self, periods=1, axis=0): # ---------------------------------------------------------------------- # Function application + def _gotitem(self, key, ndim, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : 1,2 + requested ndim of result + subset : object, default None + subset to act on + """ + if subset is None: + subset = self + + # TODO: _shallow_copy(subset)? + return self[key] + + @Appender(_shared_docs['aggregate'] % _shared_doc_kwargs) + def aggregate(self, func, axis=0, *args, **kwargs): + axis = self._get_axis_number(axis) + + # TODO: flipped axis + result = None + if axis == 0: + try: + result, how = self._aggregate(func, axis=0, *args, **kwargs) + except TypeError: + pass + if result is None: + return self.apply(func, axis=axis, args=args, **kwargs) + return result + + agg = aggregate + def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): """ @@ -4084,22 +4120,35 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, See also -------- DataFrame.applymap: For elementwise operations + DataFrame.agg: only perform aggregating type operations + DataFrame.transform: only perform transformating type operations Returns ------- applied : Series or DataFrame """ axis = self._get_axis_number(axis) - if kwds or args and not isinstance(func, np.ufunc): + ignore_failures = kwds.pop('ignore_failures', False) + + # dispatch to agg + if axis == 0 and isinstance(func, (list, dict)): + return self.aggregate(func, axis=axis, *args, **kwds) + + if len(self.columns) == 0 and len(self.index) == 0: + return self._apply_empty_result(func, axis, reduce, *args, **kwds) + # if we are a string, try to dispatch + if isinstance(func, compat.string_types): + if axis: + kwds['axis'] = axis + return getattr(self, func)(*args, **kwds) + + if kwds or args and not isinstance(func, np.ufunc): def f(x): return func(x, *args, **kwds) else: f = func - if len(self.columns) == 0 and len(self.index) == 0: - return self._apply_empty_result(func, axis, reduce, *args, **kwds) - if isinstance(f, np.ufunc): with np.errstate(all='ignore'): results = f(self.values) @@ -4116,7 +4165,10 @@ def f(x): else: if reduce is None: reduce = True - return self._apply_standard(f, axis, reduce=reduce) + return self._apply_standard( + f, axis, + reduce=reduce, + ignore_failures=ignore_failures) else: return self._apply_broadcast(f, axis) @@ -4894,11 +4946,14 @@ def _count_level(self, level, axis=0, numeric_only=False): else: return result - def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + def _reduce(self, op, name, axis=0, skipna=True, weights=None, + numeric_only=None, filter_type=None, **kwds): axis = self._get_axis_number(axis) def f(x): + if weights is not None: + kwds['weights'] = weights + return op(x, axis=axis, skipna=skipna, **kwds) labels = self._get_agg_axis(axis) @@ -4920,7 +4975,13 @@ def f(x): # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series - result = self.apply(f, reduce=False) + + # we only end up here if we have not specified + # numeric_only and yet we have tried a + # column-by-column reduction, where we have mixed type. + # So let's just do what we can + result = self.apply(f, reduce=False, + ignore_failures=True) if result.ndim == self.ndim: result = result.iloc[0] return result diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8ce4c4b00454b..1ed8e18ccc740 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -33,7 +33,7 @@ SettingWithCopyError, SettingWithCopyWarning, AbstractMethodError) -from pandas.core.base import PandasObject +from pandas.core.base import PandasObject, SelectionMixin from pandas.core.index import (Index, MultiIndex, _ensure_index, InvalidIndexError) import pandas.core.indexing as indexing @@ -91,7 +91,7 @@ def _single_replace(self, to_replace, method, inplace, limit): return result -class NDFrame(PandasObject): +class NDFrame(PandasObject, SelectionMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure @@ -428,6 +428,16 @@ def size(self): """number of elements in the NDFrame""" return np.prod(self.shape) + @property + def _selected_obj(self): + """ internal compat with SelectionMixin """ + return self + + @property + def _obj_with_exclusions(self): + """ internal compat with SelectionMixin """ + return self + def _expand_axes(self, key): new_axes = [] for k, ax in zip(key, self.axes): @@ -2542,7 +2552,7 @@ def tail(self, n=5): return self.iloc[-n:] def sample(self, n=None, frac=None, replace=False, weights=None, - random_state=None, axis=None): + random_state=None, axis=None, **kwargs): """ Returns a random sample of items from an axis of object. @@ -2557,7 +2567,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, Fraction of axis items to return. Cannot be used with `n`. replace : boolean, optional Sample with or without replacement. Default = False. - weights : str or ndarray-like, optional + weights : str or ndarray-like, optional [DEPRECATED] Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and @@ -2628,59 +2638,22 @@ def sample(self, n=None, frac=None, replace=False, weights=None, axis = self._stat_axis_number axis = self._get_axis_number(axis) - axis_length = self.shape[axis] # Process random_state argument rs = com._random_state(random_state) - # Check weights for compliance if weights is not None: + from warnings import warn + warn("the weights argument to .sample() is deprecated." + "use {typ}.weightby(weights, axis={axis}).sample(...) " + "instead".format(typ=type(self).__name__, axis=axis), + FutureWarning, stacklevel=2) + return self.weightby(weights, axis=axis).sample( + n=n, frac=frac, + replace=replace, + random_state=random_state) - # If a series, align with frame - if isinstance(weights, pd.Series): - weights = weights.reindex(self.axes[axis]) - - # Strings acceptable if a dataframe and axis = 0 - if isinstance(weights, string_types): - if isinstance(self, pd.DataFrame): - if axis == 0: - try: - weights = self[weights] - except KeyError: - raise KeyError("String passed to weights not a " - "valid column") - else: - raise ValueError("Strings can only be passed to " - "weights when sampling from rows on " - "a DataFrame") - else: - raise ValueError("Strings cannot be passed as weights " - "when sampling from a Series or Panel.") - - weights = pd.Series(weights, dtype='float64') - - if len(weights) != axis_length: - raise ValueError("Weights and axis to be sampled must be of " - "same length") - - if (weights == np.inf).any() or (weights == -np.inf).any(): - raise ValueError("weight vector may not include `inf` values") - - if (weights < 0).any(): - raise ValueError("weight vector many not include negative " - "values") - - # If has nan, set to zero. - weights = weights.fillna(0) - - # Renormalize if don't sum to 1 - if weights.sum() != 1: - if weights.sum() != 0: - weights = weights / weights.sum() - else: - raise ValueError("Invalid weights: weights sum to zero") - - weights = weights.values + axis_length = self.shape[axis] # If no frac or n, default to n=1. if n is None and frac is None: @@ -2698,6 +2671,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, raise ValueError("A negative number of rows requested. Please " "provide positive value.") + weights = kwargs.pop('_weights', None) locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False) @@ -2764,6 +2738,66 @@ def pipe(self, func, *args, **kwargs): else: return func(self, *args, **kwargs) + _shared_docs['aggregate'] = (""" + Aggregate using input function or dict of {column -> + function} + + .. versionadded:: 0.20.0 + + Parameters + ---------- + func : callable, string, dictionary, or list of string/callables + Function to use for aggregating the data. If a function, must either + work when passed a DataFrame or when passed to DataFrame.apply. If + passed a dict, the keys must be DataFrame column names. + + Accepted Combinations are: + - string function name + - function + - list of functions + - dict of column names -> functions (or list of functions) + + Notes + ----- + Numpy functions mean/median/prod/sum/std/var are special cased so the + default behavior is applying the function along axis=0 + (e.g., np.mean(arr_2d, axis=0)) as opposed to + mimicking the default Numpy behavior (e.g., np.mean(arr_2d)). + + Returns + ------- + aggregated : %(klass)s + + See also + -------- + """) + + _shared_docs['transform'] = (""" + Call function producing a like-indexed %(klass)s + and return a %(klass)s with the transformed values` + + .. versionadded:: 0.20.0 + + Parameters + ---------- + func : callable, string, dictionary, or list of string/callables + To apply to column + + Accepted Combinations are: + - string function name + - function + - list of functions + - dict of column names -> functions (or list of functions) + + Examples + -------- + >>> df.transform(lambda x: (x - x.mean()) / x.std()) + + Returns + ------- + transformed : %(klass)s + """) + # ---------------------------------------------------------------------- # Attribute access @@ -5405,14 +5439,20 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, np.putmask(rs.values, mask, np.nan) return rs - def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): + def _agg_by_level(self, name, axis=0, level=0, skipna=True, + weights=None, **kwargs): grouped = self.groupby(level=level, axis=axis) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) axis = self._get_axis_number(axis) method = getattr(type(self), name) - applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs) - return grouped.aggregate(applyf) + + def f(x): + if weights is not None: + kwargs['weights'] = weights + return method(x, axis=axis, skipna=skipna, **kwargs) + + return grouped.aggregate(f) @classmethod def _add_numeric_operations(cls): @@ -5596,6 +5636,61 @@ def ewm(self, com=None, span=None, halflife=None, alpha=None, cls.ewm = ewm + @Appender(_shared_docs['transform'] % _shared_doc_kwargs) + def transform(self, func, *args, **kwargs): + result = self.agg(func, *args, **kwargs) + if is_scalar(result) or len(result) != len(self): + raise ValueError("transforms cannot produce " + "aggregated results") + + return result + + cls.transform = transform + + def weightby(self, weights, axis=0): + """ + Provides weighted statistical calculations + + .. versionadded:: 0.20.0 + + Parameters + ---------- + weights : str or ndarray-like + If passed a Series, will align with the target object + on the index. + + Index values in weights that are not found in the target + object will be ignored and index values in the target + object not in the weights will be assigned weights of zero. + + If called on a DataFrame, will accept the name of a column + when axis = 0. + Unless weights are a Series, weights must be same length + as the axis of the target object. + + If weights do not sum to 1, they will be normalized to + sum to 1. Missing values in the weights column will be + treated as zero. + + inf and -inf values not allowed. + + axis : int or string, default 0 + + Returns + ------- + a Weightby lazy object for the particular operation + + Examples + -------- + """ + from pandas.core import weightby + + axis = self._get_axis_number(axis) + return weightby.weightby(self, weights=weights, + axis=axis) + + cls.weightby = weightby + def _doc_parms(cls): """Return a tuple of the doc parms.""" @@ -5700,6 +5795,7 @@ def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): + weights = kwargs.pop('_weights', None) nv.validate_stat_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True @@ -5707,9 +5803,9 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, - skipna=skipna) + skipna=skipna, weights=weights) return self._reduce(f, name, axis=axis, skipna=skipna, - numeric_only=numeric_only) + weights=weights, numeric_only=numeric_only) return set_function_name(stat_func, name, cls) @@ -5720,6 +5816,7 @@ def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f): @Appender(_num_ddof_doc) def stat_func(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs): + weights = kwargs.pop('_weights', None) nv.validate_stat_ddof_func(tuple(), kwargs, fname=name) if skipna is None: skipna = True @@ -5727,9 +5824,10 @@ def stat_func(self, axis=None, skipna=None, level=None, ddof=1, axis = self._stat_axis_number if level is not None: return self._agg_by_level(name, axis=axis, level=level, - skipna=skipna, ddof=ddof) + skipna=skipna, weights=weights, + ddof=ddof) return self._reduce(f, name, axis=axis, numeric_only=numeric_only, - skipna=skipna, ddof=ddof) + weights=weights, skipna=skipna, ddof=ddof) return set_function_name(stat_func, name, cls) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 7eba32b4932d0..158693ffead78 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -703,7 +703,7 @@ def _python_apply_general(self, f): not_indexed_same=mutated or self.mutated) def _iterate_slices(self): - yield self.name, self._selected_obj + yield self._selection_name, self._selected_obj def transform(self, func, *args, **kwargs): raise AbstractMethodError(self) @@ -896,9 +896,9 @@ def reset_identity(values): result = concat(values, axis=self.axis) if (isinstance(result, Series) and - getattr(self, 'name', None) is not None): + getattr(self, '_selection_name', None) is not None): - result.name = self.name + result.name = self._selection_name return result @@ -2597,7 +2597,7 @@ class SeriesGroupBy(GroupBy): exec(_def_str) @property - def name(self): + def _selection_name(self): """ since we are a series, we by definition only have a single name, but may be the result of a selection or @@ -2740,12 +2740,12 @@ def _aggregate_multiple_funcs(self, arg, _level): def _wrap_output(self, output, index, names=None): """ common agg/transform wrapping logic """ - output = output[self.name] + output = output[self._selection_name] if names is not None: return DataFrame(output, index=index, columns=names) else: - name = self.name + name = self._selection_name if name is None: name = self._selected_obj.name return Series(output, index=index, name=name) @@ -2763,7 +2763,7 @@ def _wrap_transformed_output(self, output, names=None): def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: # GH #6265 - return Series([], name=self.name, index=keys) + return Series([], name=self._selection_name, index=keys) def _get_index(): if self.grouper.nkeys > 1: @@ -2776,7 +2776,7 @@ def _get_index(): # GH #823 index = _get_index() result = DataFrame(values, index=index).stack() - result.name = self.name + result.name = self._selection_name return result if isinstance(values[0], (Series, dict)): @@ -2788,7 +2788,8 @@ def _get_index(): not_indexed_same=not_indexed_same) else: # GH #6265 - return Series(values, index=_get_index(), name=self.name) + return Series(values, index=_get_index(), + name=self._selection_name) def _aggregate_named(self, func, *args, **kwargs): result = {} @@ -2964,7 +2965,7 @@ def nunique(self, dropna=True): return Series(res, index=ri, - name=self.name) + name=self._selection_name) @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) @@ -3028,7 +3029,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, # multi-index components labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]] levels = [ping.group_index for ping in self.grouper.groupings] + [lev] - names = self.grouper.names + [self.name] + names = self.grouper.names + [self._selection_name] if dropna: mask = labels[-1] != -1 @@ -3063,7 +3064,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, if is_integer_dtype(out): out = _ensure_int64(out) - return Series(out, index=mi, name=self.name) + return Series(out, index=mi, name=self._selection_name) # for compat. with algos.value_counts need to ensure every # bin is present at every index level, null filled with zeros @@ -3094,7 +3095,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, if is_integer_dtype(out): out = _ensure_int64(out) - return Series(out, index=mi, name=self.name) + return Series(out, index=mi, name=self._selection_name) def count(self): """ Compute count of group, excluding missing values """ @@ -3107,7 +3108,7 @@ def count(self): return Series(out, index=self.grouper.result_index, - name=self.name, + name=self._selection_name, dtype='int64') def _apply_to_column_groupbys(self, func): @@ -3217,7 +3218,7 @@ def aggregate(self, arg, *args, **kwargs): try: assert not args and not kwargs result = self._aggregate_multiple_funcs( - [arg], _level=_level) + [arg], _level=_level, _axis=self.axis) result.columns = Index( result.columns.levels[0], name=self._selected_obj.columns.name) @@ -3448,7 +3449,8 @@ def first_non_None_value(values): except (ValueError, AttributeError): # GH1738: values is list of arrays of unequal lengths fall # through to the outer else caluse - return Series(values, index=key_index, name=self.name) + return Series(values, index=key_index, + name=self._selection_name) # if we have date/time like in the original, then coerce dates # as we are stacking can easily have object dtypes here @@ -3471,8 +3473,9 @@ def first_non_None_value(values): # only coerce dates if we find at least 1 datetime coerce = True if any([isinstance(x, Timestamp) for x in values]) else False - # self.name not passed through to Series as the result - # should not take the name of original selection of columns + # self._selection_name not passed through to Series as the + # result should not take the name of original selection + # of columns return (Series(values, index=key_index) ._convert(datetime=True, coerce=coerce)) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 1f76bc850cee9..9b04a6b6710ac 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -71,11 +71,14 @@ def __call__(self, alt): bn_func = None @functools.wraps(alt) - def f(values, axis=None, skipna=True, **kwds): + def f(values, axis=None, skipna=True, weights=None, **kwds): if len(self.kwargs) > 0: for k, v in compat.iteritems(self.kwargs): if k not in kwds: kwds[k] = v + + if weights is not None: + kwds['weights'] = weights try: if self.zero_value is not None and values.size == 0: if values.ndim == 1: @@ -91,7 +94,7 @@ def f(values, axis=None, skipna=True, **kwds): result.fill(0) return result - if (_USE_BOTTLENECK and skipna and + if (_USE_BOTTLENECK and skipna and weights is None and _bn_ok_dtype(values.dtype, bn_name)): result = bn_func(values, axis=axis, **kwds) @@ -101,7 +104,8 @@ def f(values, axis=None, skipna=True, **kwds): result = alt(values, axis=axis, skipna=skipna, **kwds) else: result = alt(values, axis=axis, skipna=skipna, **kwds) - except Exception: + except Exception as e: + try: result = alt(values, axis=axis, skipna=skipna, **kwds) except ValueError as e: @@ -169,11 +173,29 @@ def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): return tslib.iNaT -def _get_values(values, skipna, fill_value=None, fill_value_typ=None, - isfinite=False, copy=True): - """ utility to get the values view, mask, dtype +def _get_values(values, skipna, + fill_value=None, fill_value_typ=None, + isfinite=False, weights=None, axis=None, + copy=True): + """ + utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value - copy = True will force the copy + and adjust for weights + + Parameters + ---------- + values : ndarray + skipna : boolean + fill_value : value, default None + value to fillna + fill_value_typ : value, default None + dtype of the fillvalue + isfinite : boolean, default False + weights : ndarray, optional + normalized ndarray, same length as the axis + axis : axis to broadcast, default None + copy : boolean, default True + True will force the copy """ values = _values_from_object(values) if isfinite: @@ -181,6 +203,10 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, else: mask = isnull(values) + # weights + if weights is not None: + values = values * weights.reshape(values.shape) + dtype = values.dtype dtype_ok = _na_ok_dtype(dtype) @@ -267,13 +293,16 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch(zero_value=0) -def nansum(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, 0) +def nansum(values, axis=None, skipna=True, weights=None): + values, mask, dtype, dtype_max = _get_values(values, skipna, + 0, weights=weights, + axis=axis) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 + the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask) @@ -282,8 +311,10 @@ def nansum(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nanmean(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, 0) +def nanmean(values, axis=None, skipna=True, weights=None): + values, mask, dtype, dtype_max = _get_values(values, skipna, + 0, weights=weights, + axis=axis) dtype_sum = dtype_max dtype_count = np.float64 @@ -368,14 +399,14 @@ def _get_counts_nanvar(mask, axis, ddof, dtype=float): @disallow('M8') @bottleneck_switch(ddof=1) -def nanstd(values, axis=None, skipna=True, ddof=1): +def nanstd(values, axis=None, skipna=True, ddof=1, weights=None): result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof)) return _wrap_results(result, values.dtype) @disallow('M8') @bottleneck_switch(ddof=1) -def nanvar(values, axis=None, skipna=True, ddof=1): +def nanvar(values, axis=None, skipna=True, ddof=1, weights=None): dtype = values.dtype mask = isnull(values) @@ -414,7 +445,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @disallow('M8', 'm8') -def nansem(values, axis=None, skipna=True, ddof=1): +def nansem(values, axis=None, skipna=True, ddof=1, weights=None): var = nanvar(values, axis, skipna, ddof=ddof) mask = isnull(values) @@ -476,7 +507,7 @@ def nanargmin(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanskew(values, axis=None, skipna=True): +def nanskew(values, axis=None, skipna=True, weights=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized @@ -531,7 +562,7 @@ def nanskew(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nankurt(values, axis=None, skipna=True): +def nankurt(values, axis=None, skipna=True, weights=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f708774dd84ff..8685b51083859 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1101,10 +1101,13 @@ def _apply_2d(self, func, axis): return self._construct_return_type(dict(results)) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): if numeric_only: raise NotImplementedError('Panel.{0} does not implement ' 'numeric_only.'.format(name)) + if weights is not None: + raise NotImplementedError('Panel.{0} does not implement ' + 'weights.'.format(name)) axis_name = self._get_axis_name(axis) axis_number = self._get_axis_number(axis_name) diff --git a/pandas/core/series.py b/pandas/core/series.py index f656d72296e3a..17b7382d6e761 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2178,6 +2178,49 @@ def map_f(values, f): return self._constructor(new_values, index=self.index).__finalize__(self) + def _gotitem(self, key, ndim, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : 1,2 + requested ndim of result + subset : object, default None + subset to act on + """ + return self + + @Appender(generic._shared_docs['aggregate'] % _shared_doc_kwargs) + def aggregate(self, func, axis=0, *args, **kwargs): + axis = self._get_axis_number(axis) + result, how = self._aggregate(func, *args, **kwargs) + if result is None: + + # we can be called from an inner function which + # passes this meta-data + kwargs.pop('_axis', None) + kwargs.pop('_level', None) + + # try a regular apply, this evaluates lambdas + # row-by-row; however if the lambda is expected a Series + # expression, e.g.: lambda x: x-x.quantile(0.25) + # this will fail, so we can try a vectorized evaluation + + # we cannot FIRST try the vectorized evaluation, becuase + # then .agg and .apply would have different semantics if the + # operation is actually defined on the Series, e.g. str + try: + result = self.apply(func, *args, **kwargs) + except (ValueError, AttributeError, TypeError): + result = func(self, *args, **kwargs) + + return result + + agg = aggregate + def apply(self, func, convert_dtype=True, args=(), **kwds): """ Invoke function on values of Series. Can be ufunc (a NumPy function @@ -2201,6 +2244,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): See also -------- Series.map: For element-wise operations + Series.agg: only perform aggregating type operations + Series.transform: only perform transformating type operations Examples -------- @@ -2277,6 +2322,15 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) + # dispatch to agg + if isinstance(func, (list, dict)): + return self.aggregate(func, *args, **kwds) + + # if we are a string, try to dispatch + if isinstance(func, compat.string_types): + return self._try_aggregate_string_function(func, *args, **kwds) + + # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) else: @@ -2286,6 +2340,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): if isinstance(f, np.ufunc): return f(self) + # row-wise access if is_extension_type(self.dtype): mapped = self._values.map(f) else: @@ -2299,8 +2354,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): return self._constructor(mapped, index=self.index).__finalize__(self) - def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + def _reduce(self, op, name, axis=0, skipna=True, weights=None, + numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation @@ -2315,11 +2370,15 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) + + if weights is not None: + kwds['weights'] = weights + with np.errstate(all='ignore'): return op(delegate, skipna=skipna, **kwds) return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna, - numeric_only=numeric_only, + weights=weights, numeric_only=numeric_only, filter_type=filter_type, **kwds) def _reindex_indexer(self, new_index, indexer, copy): diff --git a/pandas/core/weightby.py b/pandas/core/weightby.py new file mode 100644 index 0000000000000..92a4e24e09afc --- /dev/null +++ b/pandas/core/weightby.py @@ -0,0 +1,239 @@ +""" + +provide a lazy structure to support +weights for calculation +similar to how we have a Groupby object + + +""" + +import numpy as np +import pandas as pd + +from pandas import Series, DataFrame +from pandas.compat import string_types, set_function_name +from pandas.types.generic import ABCSeries, ABCDataFrame +from pandas.types.common import is_scalar, is_list_like +from pandas.core.base import PandasObject, SelectionMixin +from pandas.util.decorators import Appender, Substitution + + +class Weightby(PandasObject, SelectionMixin): + _attributes = ['weights', 'axis'] + + def __init__(self, obj, weights=None, axis=0): + + self.exclusions = set() + self._weights = None + self.weights = weights + self.axis = axis + self.obj = obj + + def _gotitem(self, key, ndim, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : 1,2 + requested ndim of result + subset : object, default None + subset to act on + """ + + # create a new object to prevent aliasing + if subset is None: + subset = self.obj + + newself = self._shallow_copy(subset, obj_type=type(self)) + newself._reset_cache() + if subset.ndim == 2: + if is_scalar(key) and key in subset or is_list_like(key): + newself._selection = key + return newself + + def __getattr__(self, attr): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError("%r object has no attribute %r" % + (type(self).__name__, attr)) + + def _compute_weights(self): + """ + compute our _weights + """ + if self._weights is not None: + return self._weights + + obj = self._selected_obj + + weights = self.weights + axis = self.axis + + # If a series, align with frame + if isinstance(weights, Series): + weights = weights.reindex(obj.axes[axis]) + + # Strings acceptable if a dataframe and axis = 0 + if isinstance(weights, string_types): + + # we use self.obj as we may have a selection here + if isinstance(self.obj, pd.DataFrame): + if axis == 0: + try: + + # exclude this as an aggregator + self.exclusions.add(weights) + + weights = self.obj[weights] + + except KeyError: + raise KeyError("String passed to weights is not a " + "valid column") + else: + raise ValueError("Strings can only be passed to " + "weights when weighting by the rows on " + "a DataFrame") + else: + raise ValueError("Strings cannot be passed as weights " + "when weighting from a Series or Panel.") + + weights = Series(weights, dtype='float64') + + if len(weights) != len(obj.axes[axis]): + raise ValueError("Weights and axis to be must be of " + "same length") + + if (weights == np.inf).any() or (weights == -np.inf).any(): + raise ValueError("weight vector may not include `inf` values") + + if (weights < 0).any(): + raise ValueError("weight vector many not include negative " + "values") + + # If has nan, set to zero. + weights = weights.fillna(0) + + # Renormalize if don't sum to 1 + if weights.sum() != 1: + if weights.sum() != 0: + weights = weights / weights.sum() + else: + raise ValueError("Invalid weights: weights sum to zero") + + self._weights = weights.values + return self._weights + + def _apply(self, func, *args, **kwargs): + """ + Apply the function with weights + + Parameters + ---------- + func : string/callable to apply + + Returns + ------- + y : type of input + """ + + weights = self._compute_weights() + + # we may need to drop the dim + # before operations + obj = self._obj_with_exclusions + if self._selection is not None: + obj = obj[self._selection] + + f = getattr(obj, func) + + kwargs['axis'] = self.axis + kwargs['_weights'] = weights + + result = f(*args, **kwargs) + result = self._wrap_results(result) + return result + + def _wrap_results(self, result): + return result + + +class SeriesWeightBy(Weightby): + + @property + def _constructor(self): + return Series + + @Substitution(name='weightby') + @Appender(SelectionMixin._see_also_template) + @Appender(SelectionMixin._agg_doc) + def aggregate(self, arg, *args, **kwargs): + return super(SeriesWeightBy, self).aggregate(arg, *args, **kwargs) + + agg = aggregate + + @Appender(Series.sample.__doc__) + def sample(self, n=None, frac=None, replace=False, + random_state=None): + return self._apply('sample', n=n, frac=frac, replace=replace, + random_state=random_state) + + +class DataFrameWeightBy(Weightby): + + @property + def _constructor(self): + return DataFrame + + @Substitution(name='weightby') + @Appender(SelectionMixin._see_also_template) + @Appender(SelectionMixin._agg_doc) + def aggregate(self, arg, *args, **kwargs): + return super(DataFrameWeightBy, self).aggregate(arg, *args, **kwargs) + + agg = aggregate + + @Appender(DataFrame.sample.__doc__) + def sample(self, n=None, frac=None, replace=False, + random_state=None): + return self._apply('sample', n=n, frac=frac, replace=replace, + random_state=random_state) + + +def _add_stat_function(cls, ref_obj, name): + + @Appender(getattr(ref_obj, name).__doc__) + def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + **kwargs): + return self._apply(name, axis=axis, skipna=skipna, level=level, + numeric_only=numeric_only, **kwargs) + + setattr(cls, name, set_function_name(stat_func, name, cls)) + + +# add in stat methods +for method in ['sum', 'mean', 'std', 'var', + 'sem', 'kurt', 'skew', 'sem']: + + _add_stat_function(SeriesWeightBy, Series, method) + _add_stat_function(DataFrameWeightBy, DataFrame, method) + + +# Top-level exports +def weightby(obj, *args, **kwds): + if isinstance(obj, ABCSeries): + klass = SeriesWeightBy + elif isinstance(obj, ABCDataFrame): + klass = DataFrameWeightBy + else: + raise TypeError('invalid type: %s' % type(obj)) + + return klass(obj, *args, **kwds) + + +weightby.__doc__ = Weightby.__doc__ diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index d6bc892921c42..d2842ae80088a 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -325,7 +325,7 @@ def __array_finalize__(self, obj): self.fill_value = getattr(obj, 'fill_value', None) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + weights=None, filter_type=None, **kwds): """ perform a reduction operation """ return op(self.get_values(), skipna=skipna, **kwds) diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 9e68b7e76d78f..55b81f986983c 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -108,6 +108,17 @@ def test_apply_standard_nonunique(self): rs = df.T.apply(lambda s: s[0], axis=0) assert_series_equal(rs, xp) + def test_with_string_args(self): + + for arg in ['sum', 'mean', 'min', 'max', 'std']: + result = self.frame.apply(arg) + expected = getattr(self.frame, arg)() + tm.assert_series_equal(result, expected) + + result = self.frame.apply(arg, axis=1) + expected = getattr(self.frame, arg)(axis=1) + tm.assert_series_equal(result, expected) + def test_apply_broadcast(self): broadcasted = self.frame.apply(np.mean, broadcast=True) agged = self.frame.apply(np.mean) @@ -448,3 +459,161 @@ def test_apply_non_numpy_dtype(self): df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category') result = df.apply(lambda x: x) assert_frame_equal(result, df) + + +def zip_frames(*frames): + """ + take a list of frames, zip the columns together for each + assume that these all have the first frame columns + + return a new frame + """ + columns = frames[0].columns + zipped = [f[c] for c in columns for f in frames] + return pd.concat(zipped, axis=1) + + +class TestDataFrameAggregate(tm.TestCase, TestData): + + _multiprocess_can_split_ = True + + def test_agg_transform(self): + + with np.errstate(all='ignore'): + + f_sqrt = np.sqrt(self.frame) + f_abs = np.abs(self.frame) + + # ufunc + result = self.frame.transform(np.sqrt) + expected = f_sqrt.copy() + assert_frame_equal(result, expected) + + result = self.frame.apply(np.sqrt) + assert_frame_equal(result, expected) + + result = self.frame.transform(np.sqrt) + assert_frame_equal(result, expected) + + # list-like + result = self.frame.apply([np.sqrt]) + expected = f_sqrt.copy() + expected.columns = pd.MultiIndex.from_product( + [self.frame.columns, ['sqrt']]) + assert_frame_equal(result, expected) + + result = self.frame.transform([np.sqrt]) + assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + expected = zip_frames(f_sqrt, f_abs) + expected.columns = pd.MultiIndex.from_product( + [self.frame.columns, ['sqrt', 'absolute']]) + result = self.frame.apply([np.sqrt, np.abs]) + assert_frame_equal(result, expected) + + result = self.frame.transform(['sqrt', np.abs]) + assert_frame_equal(result, expected) + + def test_transform_and_agg_err(self): + # cannot both transform and agg + def f(): + self.frame.transform(['max', 'min']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.frame.agg(['max', 'sqrt']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.frame.transform(['max', 'sqrt']) + self.assertRaises(ValueError, f) + + df = pd.DataFrame({'A': range(5), 'B': 5}) + + def f(): + with np.errstate(all='ignore'): + df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}) + + def test_demo(self): + # demonstration tests + df = pd.DataFrame({'A': range(5), 'B': 5}) + + result = df.agg(['min', 'max']) + expected = DataFrame({'A': [0, 4], 'B': [5, 5]}, + columns=['A', 'B'], + index=['min', 'max']) + tm.assert_frame_equal(result, expected) + + result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']}) + expected = DataFrame({'A': [4.0, 0.0, np.nan], + 'B': [5.0, np.nan, 25.0]}, + columns=['A', 'B'], + index=['max', 'min', 'sum']) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + def test_agg_reduce(self): + # all reducers + expected = zip_frames(self.frame.mean().to_frame(), + self.frame.max().to_frame(), + self.frame.sum().to_frame()).T + expected.index = ['mean', 'max', 'sum'] + result = self.frame.agg(['mean', 'max', 'sum']) + assert_frame_equal(result, expected) + + # dict input with scalars + result = self.frame.agg({'A': 'mean', 'B': 'sum'}) + expected = Series([self.frame.A.mean(), self.frame.B.sum()], + index=['A', 'B']) + assert_series_equal(result.reindex_like(expected), expected) + + # dict input with lists + result = self.frame.agg({'A': ['mean'], 'B': ['sum']}) + expected = DataFrame({'A': Series([self.frame.A.mean()], + index=['mean']), + 'B': Series([self.frame.B.sum()], + index=['sum'])}) + assert_frame_equal(result.reindex_like(expected), expected) + + # dict input with lists with multiple + result = self.frame.agg({'A': ['mean', 'sum'], + 'B': ['sum', 'max']}) + expected = DataFrame({'A': Series([self.frame.A.mean(), + self.frame.A.sum()], + index=['mean', 'sum']), + 'B': Series([self.frame.B.sum(), + self.frame.B.max()], + index=['sum', 'max'])}) + assert_frame_equal(result.reindex_like(expected), expected) + + def test_nuiscance_columns(self): + + # GH 15015 + df = DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + + result = df.agg('min') + expected = Series([1, 1., 'bar', pd.Timestamp('20130101')], + index=df.columns) + assert_series_equal(result, expected) + + result = df.agg(['min']) + expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]], + index=['min'], columns=df.columns) + assert_frame_equal(result, expected) + + result = df.agg('sum') + expected = Series([6, 6., 'foobarbaz'], + index=['A', 'B', 'C']) + assert_series_equal(result, expected) + + result = df.agg(['sum']) + expected = DataFrame([[6, 6., 'foobarbaz']], + index=['sum'], columns=['A', 'B', 'C']) + assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e87b5d04271e8..3031922b85821 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -5148,7 +5148,7 @@ def test_tab_completion(self): expected = set( ['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter', 'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max', - 'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot', + 'mean', 'median', 'min', 'ngroups', 'nth', 'ohlc', 'plot', 'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'head', 'irow', 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna', 'cumsum', diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index ec7ffde344d31..d82c2b8ec4b10 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -1,13 +1,14 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 +from collections import OrderedDict import numpy as np import pandas as pd from pandas import (Index, Series, DataFrame, isnull) from pandas.compat import lrange from pandas import compat -from pandas.util.testing import assert_series_equal +from pandas.util.testing import assert_series_equal, assert_frame_equal import pandas.util.testing as tm from .common import TestData @@ -25,16 +26,11 @@ def test_apply(self): import math assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts)) - # how to handle Series result, #2316 - result = self.ts.apply(lambda x: Series( - [x, x ** 2], index=['x', 'x^2'])) - expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2}) - tm.assert_frame_equal(result, expected) - # empty series s = Series(dtype=object, name='foo', index=pd.Index([], name='bar')) rs = s.apply(lambda x: x) tm.assert_series_equal(s, rs) + # check all metadata (GH 9322) self.assertIsNot(s, rs) self.assertIs(s.index, rs.index) @@ -66,6 +62,13 @@ def test_apply_dont_convert_dtype(self): result = s.apply(f, convert_dtype=False) self.assertEqual(result.dtype, object) + def test_with_string_args(self): + + for arg in ['sum', 'mean', 'min', 'max', 'std']: + result = self.ts.apply(arg) + expected = getattr(self.ts, arg)() + self.assertEqual(result, expected) + def test_apply_args(self): s = Series(['foo,bar']) @@ -139,6 +142,157 @@ def f(x): tm.assert_series_equal(result, exp) +class TestSeriesAggregate(TestData, tm.TestCase): + + _multiprocess_can_split_ = True + + def test_transform(self): + # transforming functions + + with np.errstate(all='ignore'): + + f_sqrt = np.sqrt(self.series) + f_abs = np.abs(self.series) + + # ufunc + result = self.series.transform(np.sqrt) + expected = f_sqrt.copy() + assert_series_equal(result, expected) + + result = self.series.apply(np.sqrt) + assert_series_equal(result, expected) + + # list-like + result = self.series.transform([np.sqrt]) + expected = f_sqrt.to_frame().copy() + expected.columns = ['sqrt'] + assert_frame_equal(result, expected) + + result = self.series.transform([np.sqrt]) + assert_frame_equal(result, expected) + + result = self.series.transform(['sqrt']) + assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = pd.concat([f_sqrt, f_abs], axis=1) + expected.columns = ['sqrt', 'absolute'] + result = self.series.apply([np.sqrt, np.abs]) + assert_frame_equal(result, expected) + + result = self.series.transform(['sqrt', 'abs']) + expected.columns = ['sqrt', 'abs'] + assert_frame_equal(result, expected) + + # dict, provide renaming + expected = pd.concat([f_sqrt, f_abs], axis=1) + expected.columns = ['foo', 'bar'] + expected = expected.unstack().rename('series') + + result = self.series.apply({'foo': np.sqrt, 'bar': np.abs}) + assert_series_equal(result.reindex_like(expected), expected) + + def test_transform_and_agg_error(self): + # we are trying to transform with an aggregator + def f(): + self.series.transform(['min', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.agg(['sqrt', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.transform(['sqrt', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.agg({'foo': np.sqrt, 'bar': 'sum'}) + self.assertRaises(ValueError, f) + + def test_demo(self): + # demonstration tests + s = Series(range(6), dtype='int64', name='series') + + result = s.agg(['min', 'max']) + expected = Series([0, 5], index=['min', 'max'], name='series') + tm.assert_series_equal(result, expected) + + result = s.agg({'foo': 'min'}) + expected = Series([0], index=['foo'], name='series') + tm.assert_series_equal(result, expected) + + result = s.agg({'foo': ['min', 'max']}) + expected = DataFrame( + {'foo': [0, 5]}, + index=['min', 'max']).unstack().rename('series') + tm.assert_series_equal(result, expected) + + def test_multiple_aggregators_with_dict_api(self): + + s = Series(range(6), dtype='int64', name='series') + result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']}) + + expected = DataFrame( + {'foo': [5.0, np.nan, 0.0, np.nan], + 'bar': [np.nan, 2.5, np.nan, 15.0]}, + columns=['foo', 'bar'], + index=['max', 'mean', + 'min', 'sum']).unstack().rename('series') + tm.assert_series_equal(result.reindex_like(expected), expected) + + def test_agg_apply_evaluate_lambdas_the_same(self): + # test that we are evaluating row-by-row first + # before vectorized evaluation + result = self.series.apply(lambda x: str(x)) + expected = self.series.agg(lambda x: str(x)) + tm.assert_series_equal(result, expected) + + result = self.series.apply(str) + expected = self.series.agg(str) + tm.assert_series_equal(result, expected) + + def test_with_nested_series(self): + # GH 2316 + # .agg with a reducer and a transform, what to do + result = self.ts.apply(lambda x: Series( + [x, x ** 2], index=['x', 'x^2'])) + expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2}) + tm.assert_frame_equal(result, expected) + + result = self.ts.agg(lambda x: Series( + [x, x ** 2], index=['x', 'x^2'])) + tm.assert_frame_equal(result, expected) + + def test_replicate_describe(self): + # this also tests a result set that is all scalars + expected = self.series.describe() + result = self.series.apply(OrderedDict( + [('count', 'count'), + ('mean', 'mean'), + ('std', 'std'), + ('min', 'min'), + ('25%', lambda x: x.quantile(0.25)), + ('50%', 'median'), + ('75%', lambda x: x.quantile(0.75)), + ('max', 'max')])) + assert_series_equal(result, expected) + + def test_reduce(self): + # reductions with named functions + result = self.series.agg(['sum', 'mean']) + expected = Series([self.series.sum(), + self.series.mean()], + ['sum', 'mean'], + name=self.series.name) + assert_series_equal(result, expected) + + class TestSeriesMap(TestData, tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3500ce913462a..299ce80243d3a 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -449,60 +449,6 @@ def test_sample(self): self.assertTrue(len(o.sample(frac=0.34) == 3)) self.assertTrue(len(o.sample(frac=0.36) == 4)) - ### - # Check weights - ### - - # Weight length must be right - with tm.assertRaises(ValueError): - o.sample(n=3, weights=[0, 1]) - - with tm.assertRaises(ValueError): - bad_weights = [0.5] * 11 - o.sample(n=3, weights=bad_weights) - - with tm.assertRaises(ValueError): - bad_weight_series = Series([0, 0, 0.2]) - o.sample(n=4, weights=bad_weight_series) - - # Check won't accept negative weights - with tm.assertRaises(ValueError): - bad_weights = [-0.1] * 10 - o.sample(n=3, weights=bad_weights) - - # Check inf and -inf throw errors: - with tm.assertRaises(ValueError): - weights_with_inf = [0.1] * 10 - weights_with_inf[0] = np.inf - o.sample(n=3, weights=weights_with_inf) - - with tm.assertRaises(ValueError): - weights_with_ninf = [0.1] * 10 - weights_with_ninf[0] = -np.inf - o.sample(n=3, weights=weights_with_ninf) - - # All zeros raises errors - zero_weights = [0] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=zero_weights) - - # All missing weights - nan_weights = [np.nan] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=nan_weights) - - # Check np.nan are replaced by zeros. - weights_with_nan = [np.nan] * 10 - weights_with_nan[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) - - # Check None are also replaced by zeros. - weights_with_None = [None] * 10 - weights_with_None[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) - def test_size_compat(self): # GH8846 # size property should be defined @@ -1580,66 +1526,13 @@ class TestNDFrame(tm.TestCase): # tests that don't fit elsewhere def test_sample(sel): - # Fixes issue: 2419 - # additional specific object based tests - - # A few dataframe test with degenerate weights. - easy_weight_list = [0] * 10 - easy_weight_list[5] = 1 + # all weight testing happens in test_weightby.py df = pd.DataFrame({'col1': range(10, 20), 'col2': range(20, 30), - 'colString': ['a'] * 10, - 'easyweights': easy_weight_list}) - sample1 = df.sample(n=1, weights='easyweights') - assert_frame_equal(sample1, df.iloc[5:6]) - - # Ensure proper error if string given as weight for Series, panel, or - # DataFrame with axis = 1. - s = Series(range(10)) - with tm.assertRaises(ValueError): - s.sample(n=3, weights='weight_column') - - panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4], - minor_axis=[3, 4, 5]) - with tm.assertRaises(ValueError): - panel.sample(n=1, weights='weight_column') - - with tm.assertRaises(ValueError): - df.sample(n=1, weights='weight_column', axis=1) + 'colString': ['a'] * 10}) # Check weighting key error - with tm.assertRaises(KeyError): - df.sample(n=3, weights='not_a_real_column_name') - - # Check that re-normalizes weights that don't sum to one. - weights_less_than_1 = [0] * 10 - weights_less_than_1[0] = 0.5 - tm.assert_frame_equal( - df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) - - ### - # Test axis argument - ### - - # Test axis argument - df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) - second_column_weight = [0, 1] - assert_frame_equal( - df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) - - # Different axis arg types - assert_frame_equal(df.sample(n=1, axis='columns', - weights=second_column_weight), - df[['col2']]) - - weight = [0] * 10 - weight[5] = 0.5 - assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), - df.iloc[5:6]) - assert_frame_equal(df.sample(n=1, axis='index', weights=weight), - df.iloc[5:6]) - # Check out of range axis values with tm.assertRaises(ValueError): df.sample(n=1, axis=2) @@ -1651,20 +1544,6 @@ def test_sample(sel): s = pd.Series(range(10)) s.sample(n=1, axis=1) - # Test weight length compared to correct axis - with tm.assertRaises(ValueError): - df.sample(n=1, axis=1, weights=[0.5] * 10) - - # Check weights with axis = 1 - easy_weight_list = [0] * 3 - easy_weight_list[2] = 1 - - df = pd.DataFrame({'col1': range(10, 20), - 'col2': range(20, 30), - 'colString': ['a'] * 10}) - sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) - assert_frame_equal(sample1, df[['colString']]) - # Test default axes p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6], minor_axis=[1, 3, 5]) @@ -1675,27 +1554,6 @@ def test_sample(sel): df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)) - # Test that function aligns weights with frame - df = DataFrame( - {'col1': [5, 6, 7], - 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) - s = Series([1, 0, 0], index=[3, 5, 9]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s)) - - # Weights have index values to be dropped because not in - # sampled DataFrame - s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2)) - - # Weights have empty values to be filed with zeros - s3 = Series([0.01, 0], index=[3, 5]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3)) - - # No overlap in weight and sampled DataFrame indices - s4 = Series([1, 0], index=[1, 2]) - with tm.assertRaises(ValueError): - df.sample(1, weights=s4) - def test_squeeze(self): # noop for s in [tm.makeFloatSeries(), tm.makeStringSeries(), diff --git a/pandas/tests/test_weightby.py b/pandas/tests/test_weightby.py new file mode 100644 index 0000000000000..fedbabfa0a91c --- /dev/null +++ b/pandas/tests/test_weightby.py @@ -0,0 +1,233 @@ +import numpy as np +import pandas as pd + +from pandas import DataFrame, Series +from pandas.util import testing as tm +from pandas.core import common as com + + +class TestWeights(tm.TestCase): + + def setUp(self): + self.df = DataFrame({'A': [0.25, 0.25, 0.25, 0.25], + 'B': [1, 2, 3, 4]}) + self.df2 = DataFrame({'A': [1, 2, 3, 4], + 'B': [1, 2, 3, 4]}) + + def test_basic(self): + + for f in ['sum', 'mean']: + weights = self.df[['A']] / self.df.A.sum() + result = getattr(self.df.weightby('A'), f)() + expected = getattr(self.df[['B']] * weights.values, f)() + tm.assert_series_equal(result, expected) + + weights2 = self.df2[['A']] / self.df2.A.sum() + result = getattr(self.df2.weightby('A'), f)() + expected = getattr(self.df2[['B']] * weights2.values, f)() + tm.assert_series_equal(result, expected) + + for f in ['kurt', 'skew', 'sem']: + weights = self.df[['A']] / self.df.A.sum() + result = getattr(self.df.weightby('A'), f)() + expected = getattr(self.df[['B']] * weights.values, f)() + # tm.assert_series_equal(result, expected) + + weights2 = self.df2[['A']] / self.df2.A.sum() + result = getattr(self.df2.weightby('A'), f)() + expected = getattr(self.df2[['B']] * weights2.values, f)() + # tm.assert_series_equal(result, expected) + + for f in ['std', 'var']: + + weights = self.df[['A']] / self.df.A.sum() + result = getattr(self.df.weightby('A'), f)(ddof=2) + expected = getattr(self.df[['B']] * weights.values, f)(ddof=2) + # tm.assert_series_equal(result, expected) + + weights2 = self.df2[['A']] / self.df2.A.sum() + result = getattr(self.df2.weightby('A'), f)(ddof=2) + expected = getattr(self.df2[['B']] * weights2.values, f)(ddof=2) + # tm.assert_series_equal(result, expected) + + def test_gotitem(self): + + result = self.df.weightby('A')['B'].sum() + expected = self.df.weightby('A').sum()['B'] + self.assertEqual(result, expected) + + result = self.df.weightby('A').B.sum() + self.assertEqual(result, expected) + + result = self.df['B'].weightby(self.df['A']).sum() + self.assertEqual(result, expected) + + def test_sample_deprecation(self): + rs = com._random_state(1234) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = self.df.sample(2, random_state=rs, weights='A') + + expected = self.df.iloc[[0, 2]][['B']] + tm.assert_frame_equal(result, expected) + + def test_unsupported(self): + for f in ['first', 'median', 'min', 'max', 'prod']: + + def func(): + getattr(self.df.weightby('A'), f)() + self.assertRaises(AttributeError, func) + + def test_panel_unsupported(self): + panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4], + minor_axis=[3, 4, 5]) + with tm.assertRaises(AttributeError): + panel.weightby('weight_column') + + def test_weights_validation(self): + o = DataFrame(np.random.randn(10, 10)) + + # Weight length must be right + with tm.assertRaises(ValueError): + o.weightby([0, 1]).sample(n=3) + + with tm.assertRaises(ValueError): + bad_weights = [0.5] * 11 + o.weightby(bad_weights).sample(n=3) + + with tm.assertRaises(ValueError): + bad_weight_series = Series([0, 0, 0.2]) + o.weightby(bad_weight_series).sample(n=4) + + # Check won't accept negative weights + with tm.assertRaises(ValueError): + bad_weights = [-0.1] * 10 + o.weightby(bad_weights).sample(n=3) + + # Check inf and -inf throw errors: + with tm.assertRaises(ValueError): + weights_with_inf = [0.1] * 10 + weights_with_inf[0] = np.inf + o.weightby(weights_with_inf).sample(n=3) + + with tm.assertRaises(ValueError): + weights_with_ninf = [0.1] * 10 + weights_with_ninf[0] = -np.inf + o.weightby(weights_with_ninf).sample(n=3) + + # All zeros raises errors + zero_weights = [0] * 10 + with tm.assertRaises(ValueError): + o.weightby(zero_weights).sample(n=3) + + # All missing weights + nan_weights = [np.nan] * 10 + with tm.assertRaises(ValueError): + o.weightby(nan_weights).sample(n=3) + + # Check np.nan are replaced by zeros. + weights_with_nan = [np.nan] * 10 + weights_with_nan[5] = 0.5 + tm.assert_frame_equal( + o.weightby(weights_with_nan, axis=0).sample(n=1), o.iloc[5:6]) + + # Check None are also replaced by zeros. + weights_with_None = [None] * 10 + weights_with_None[5] = 0.5 + tm.assert_frame_equal( + o.weightby(weights_with_None, axis=0).sample(n=1), o.iloc[5:6]) + + def test_weights_strings(sel): + # Fixes issue: 2419 + # additional specific object based tests + + # A few dataframe test with degenerate weights. + easy_weight_list = [0] * 10 + easy_weight_list[5] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10, + 'easyweights': easy_weight_list}) + result = df.weightby('easyweights').sample(n=1) + expected = df.iloc[5:6, 0:-1] + tm.assert_frame_equal(result, expected) + + # Ensure proper error if string given as weight for Series, panel, or + # DataFrame with axis = 1. + s = Series(range(10)) + with tm.assertRaises(ValueError): + s.weightby('weight_column').sample(n=3) + + with tm.assertRaises(ValueError): + df.weightby('weight_column', axis=1).sample(n=1) + + # Check weighting key error + with tm.assertRaises(KeyError): + df.weightby('not_a_real_column_name').sample(n=3) + + # Check that re-normalizes weights that don't sum to one. + weights_less_than_1 = [0] * 10 + weights_less_than_1[0] = 0.5 + tm.assert_frame_equal( + df.weightby(weights_less_than_1).sample(n=1), df.iloc[:1]) + + def test_weights_axis(sel): + + # Test axis argument + df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) + second_column_weight = [0, 1] + result = df.weightby(second_column_weight, axis=1).sample(n=1) + tm.assert_frame_equal(result, df[['col2']]) + + # Different axis arg types + result = df.weightby(second_column_weight, axis='columns').sample(n=1) + tm.assert_frame_equal(result, df[['col2']]) + + weight = [0] * 10 + weight[5] = 0.5 + tm.assert_frame_equal(df.weightby(weight, axis='index').sample(n=1), + df.iloc[5:6]) + + # Test weight length compared to correct axis + with tm.assertRaises(ValueError): + df.weightby([0.5] * 10, axis=1).sample(n=1) + + # Check weights with axis = 1 + easy_weight_list = [0] * 3 + easy_weight_list[2] = 1 + + df = pd.DataFrame({'col1': range(10, 20), + 'col2': range(20, 30), + 'colString': ['a'] * 10}) + result = df.weightby(easy_weight_list, axis=1).sample(n=1) + tm.assert_frame_equal(result, df[['colString']]) + + # Test that function aligns weights with frame + df = DataFrame( + {'col1': [5, 6, 7], + 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) + s = Series([1, 0, 0], index=[3, 5, 9]) + result = df.weightby(s).sample(1) + tm.assert_frame_equal(result, df.loc[[3]]) + + # Weights have index values to be dropped because not in + # sampled DataFrame + s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) + result = df.weightby(s2).sample(1) + tm.assert_frame_equal(result, df.loc[[3]]) + + # Weights have empty values to be filed with zeros + s3 = Series([0.01, 0], index=[3, 5]) + result = df.weightby(s3).sample(1) + tm.assert_frame_equal(result, df.loc[[3]]) + + # No overlap in weight and sampled DataFrame indices + s4 = Series([1, 0], index=[1, 2]) + with tm.assertRaises(ValueError): + df.weightby(s4).sample(1) + + +if __name__ == '__main__': + import nose + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 26c311b4a72f8..a9468c7e5aff8 100755 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1814,10 +1814,12 @@ def test_how_lambda_functions(self): tm.assert_series_equal(result['foo'], foo_exp) tm.assert_series_equal(result['bar'], bar_exp) + # this is a MI Series, so comparing the names of the results + # doesn't make sense result = ts.resample('M').aggregate({'foo': lambda x: x.mean(), 'bar': lambda x: x.std(ddof=1)}) - tm.assert_series_equal(result['foo'], foo_exp) - tm.assert_series_equal(result['bar'], bar_exp) + tm.assert_series_equal(result['foo'], foo_exp, check_names=False) + tm.assert_series_equal(result['bar'], bar_exp, check_names=False) def test_resample_unequal_times(self): # #1772 diff --git a/pandas/types/cast.py b/pandas/types/cast.py index 6b1c3f9c00351..8d7ba305cef1a 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -44,6 +44,23 @@ def _possibly_convert_platform(values): return values +def _is_nested_object(obj): + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + + if isinstance(obj, ABCSeries) and is_object_dtype(obj): + + if any(isinstance(v, ABCSeries) for v in obj.values): + return True + + return False + + def _possibly_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32
closes #10030 this is on top of #14483 provides a groupby-like API to weighted calculations. The weights are lazily calculated and cached. Deprecates the ``weights`` parameter to ``.sample()``, and implements all of this logic inside ``.weightby``. TODO: - only sum/mean are implemented ATM, but logic for other ops (std, var, kurt, skew) are straightforward. - no logic ATM for a ``.groupby(...).weightby(...)`` or ``.groupby(...).rolling(...)``, but should be a straightforward enhancement. - easy enhancement to add 'other' weight calculations at some point (just adding args to the ``.weightby`` constructor), see [here](https://github.com/pandas-dev/pandas/issues/10030#issuecomment-238324993) ``` In [1]: df = DataFrame({'A': [1, 2, 3, 4], ...: 'B': [1, 2, 3, 4]}) ...: In [2]: df Out[2]: A B 0 1 1 1 2 2 2 3 3 3 4 4 In [3]: df.weightby('A').B.sum() Out[3]: 3.0 In [4]: df.weightby('A').sum() Out[4]: B 3.0 dtype: float64 In [5]: df.weightby('A').sample(n=2) Out[5]: B 3 4 1 2 In [7]: w = df.weightby('A') In [8]: w.mean() Out[8]: B 0.75 dtype: float64 In [9]: w._weights Out[9]: array([ 0.1, 0.2, 0.3, 0.4]) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/15031
2017-01-01T19:01:43Z
2017-01-02T18:36:33Z
null
2017-01-02T18:36:33Z
COMPAT: py2.7 compat for Timestamp.replace
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index a947b4f3ca0ac..bbf528a50e1bb 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -305,7 +305,7 @@ Bug Fixes - Bug in ``Series`` construction with a datetimetz (:issue:`14928`) - +- Bug in compat for passing long integers to ``Timestamp.replace`` (:issue:`15030`) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 3f4a10619f7f5..1834c56e59bb9 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -1883,6 +1883,17 @@ def test_timestamp_date_out_of_range(self): self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01']) self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) + def test_compat_replace(self): + # https://github.com/statsmodels/statsmodels/issues/3349 + # replace should take ints/longs for compat + + for f in [compat.long, int]: + result = date_range(Timestamp('1960-04-01 00:00:00', + freq='QS-JAN'), + periods=f(76), + freq='QS-JAN') + self.assertEqual(len(result), 76) + def test_timestamp_repr(self): # pre-1900 stamp = Timestamp('1850-01-01', tz='US/Eastern') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 649aa22e5e3ae..9a20c36638bce 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -687,7 +687,7 @@ class Timestamp(_Timestamp): # replace def validate(k, v): """ validate integers """ - if not isinstance(v, int): + if not is_integer_object(v): raise ValueError("value must be an integer, received " "{v} for {k}".format(v=type(v), k=k)) return v
compat in Timestamp.replace when passing longs (and not ints) xref https://github.com/statsmodels/statsmodels/issues/3349
https://api.github.com/repos/pandas-dev/pandas/pulls/15030
2017-01-01T17:53:14Z
2017-01-01T18:51:24Z
2017-01-01T18:51:24Z
2017-01-01T18:51:24Z
API: allow list-like to DataFrame rename
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 1ea6662a4edb0..9dd8b2d391782 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1586,6 +1586,25 @@ If you create an index yourself, you can just assign it to the ``index`` field: data.index = index +.. versionadded:: 0.20.0 + +A new index can also be created on an existing object by passing values +to the ``rename`` method. + +.. ipython:: python + :suppress: + + data = data.reset_index() + +.. ipython:: python + + data + data.rename(index=['a', 'b', 'c', 'd']) + data.rename(columns=['q', 'w', 'r', 't']) + + + + .. _indexing.view_versus_copy: Returning a view versus a copy diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index a947b4f3ca0ac..f0e2680ca8eeb 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -113,7 +113,7 @@ Other enhancements - ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) - ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`) - +- ``pd.DataFrame.rename`` now accepts a list-like for the ``index`` and ``columns`` parameters to assign new axis values (:issue:`14829`) .. _whatsnew_0200.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d96fb094f5d5c..f3f621fdd4029 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2842,6 +2842,11 @@ def set_index(self, keys, drop=True, append=False, inplace=False, Returns ------- dataframe : DataFrame + + See Also + -------- + DataFrame.rename + """ if not isinstance(keys, list): keys = [keys] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8ce4c4b00454b..98be360ea1762 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -581,7 +581,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, - and raise on DataFrame or Panel. + for DataFrame list-like will form new values for the axes, dict-like or functions are transformations to apply to that axis' values copy : boolean, default True @@ -635,6 +635,11 @@ def swaplevel(self, i=-2, j=-1, axis=0): 0 1 4 1 2 5 2 3 6 + >>> df.rename(index=['a', 'b', 'c'], columns=[1, 2]) + 1 2 + a 1 4 + b 2 5 + c 3 6 """ @Appender(_shared_docs['rename'] % dict(axes='axes keywords for this' @@ -672,13 +677,17 @@ def f(x): # start in the axis order to eliminate too many copies for axis in lrange(self._AXIS_LEN): v = axes.get(self._AXIS_NAMES[axis]) + baxis = self._get_block_manager_axis(axis) if v is None: continue - f = _get_rename_function(v) - baxis = self._get_block_manager_axis(axis) - result._data = result._data.rename_axis(f, axis=baxis, copy=copy) - result._clear_item_cache() + f = _get_rename_function(v) + if is_list_like(f) and not callable(f): + result._set_axis(axis=baxis, labels=v) + else: + result._data = result._data.rename_axis(f, axis=baxis, + copy=copy) + result._clear_item_cache() if inplace: self._update_inplace(result._data) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3500ce913462a..be6092ca3a5e7 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -1504,6 +1504,30 @@ def test_to_xarray(self): expected, check_index_type=False) + def test_rename_list_like(self): + # GH 14829 + df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}, columns=['a', 'b']) + + expected = df.copy() + expected.columns = ['J', 'K'] + result = df.rename(columns=['J', 'K']) + assert_frame_equal(result, expected) + + expected.index = ['a', 'b'] + for box in [list, np.array, Index]: + result = df.rename(columns=box(['J', 'K']), index=box(['a', 'b'])) + assert_frame_equal(result, expected) + + result = df.copy() + result.rename(columns=['J', 'K'], index=['a', 'b'], inplace=True) + assert_frame_equal(result, expected) + + with tm.assertRaises(ValueError): + df.rename(index=[1, 3, 3]) + + with tm.assertRaises(TypeError): + df.rename(index=1) + class TestPanel(tm.TestCase, Generic): _typ = Panel
- [x] closes #14829 - [ ] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/15029
2017-01-01T15:20:03Z
2017-01-11T01:22:40Z
null
2017-01-11T01:22:40Z
DOC: Added examples to pandas.concat documentation
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index efae7c63a9d0e..4012629aa3c90 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1398,9 +1398,11 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic - along the other axes. Can also add a layer of hierarchical indexing on the - concatenation axis, which may be useful if the labels are the same (or - overlapping) on the passed axis number + along the other axes. + + Can also add a layer of hierarchical indexing on the concatenation axis, + which may be useful if the labels are the same (or overlapping) on + the passed axis number. Parameters ---------- @@ -1436,13 +1438,141 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, copy : boolean, default True If False, do not copy data unnecessarily - Notes - ----- - The keys, levels, and names arguments are all optional - Returns ------- concatenated : type of objects + + Notes + ----- + The keys, levels, and names arguments are all optional. + + A walkthrough of how this method fits in with other tools for combining + panda objects can be found `here + <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. + + See Also + -------- + Series.append + DataFrame.append + DataFrame.join + DataFrame.merge + + Examples + -------- + Combine two ``Series``. + + >>> s1 = pd.Series(['a', 'b']) + >>> s2 = pd.Series(['c', 'd']) + >>> pd.concat([s1, s2]) + 0 a + 1 b + 0 c + 1 d + dtype: object + + Clear the existing index and reset it in the result + by setting the ``ignore_index`` option to ``True``. + + >>> pd.concat([s1, s2], ignore_index=True) + 0 a + 1 b + 2 c + 3 d + dtype: object + + Add a hierarchical index at the outermost level of + the data with the ``keys`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2',]) + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Label the index keys you create with the ``names`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2'], + ... names=['Series name', 'Row ID']) + Series name Row ID + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Combine two ``DataFrame`` objects with identical columns. + + >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], + ... columns=['letter', 'number']) + >>> df1 + letter number + 0 a 1 + 1 b 2 + >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], + ... columns=['letter', 'number']) + >>> df2 + letter number + 0 c 3 + 1 d 4 + >>> pd.concat([df1, df2]) + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects with overlapping columns + and return everything. Columns outside the intersection will + be filled with ``NaN`` values. + + >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], + ... columns=['letter', 'number', 'animal']) + >>> df3 + letter number animal + 0 c 3 cat + 1 d 4 dog + >>> pd.concat([df1, df3]) + animal letter number + 0 NaN a 1 + 1 NaN b 2 + 0 cat c 3 + 1 dog d 4 + + Combine ``DataFrame`` objects with overlapping columns + and return only those that are shared by passing ``inner`` to + the ``join`` keyword argument. + + >>> pd.concat([df1, df3], join="inner") + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects horizontally along the x axis by + passing in ``axis=1``. + + >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], + ... columns=['animal', 'name']) + >>> pd.concat([df1, df4], axis=1) + letter number animal name + 0 a 1 bird polly + 1 b 2 monkey george + + Prevent the result from including duplicate index values with the + ``verify_integrity`` option. + + >>> df5 = pd.DataFrame([1], index=['a']) + >>> df5 + 0 + a 1 + >>> df6 = pd.DataFrame([2], index=['a']) + >>> df6 + 0 + a 2 + >>> pd.concat([df5, df6], verify_integrity=True) + ValueError: Indexes have overlapping values: ['a'] """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join,
I've added two simple examples to the ``pd.concat`` documentation. I've also done a very small amount of clean up on the rest of the docstring. - [x] passes ``git diff upstream/master | flake8 --diff``
https://api.github.com/repos/pandas-dev/pandas/pulls/15028
2017-01-01T02:20:26Z
2017-01-04T13:51:35Z
2017-01-04T13:51:35Z
2017-01-04T13:51:46Z
docs cheat sheet pandas Pd to pd
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf index a2b222c683564..d504926d22580 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx index 5202256006ddf..76ae8f1e39d4e 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ
Change pandas syntax from Pd to pd Closes [issue 15017](https://github.com/pandas-dev/pandas/issues/15017#issuecomment-269871137)
https://api.github.com/repos/pandas-dev/pandas/pulls/15026
2016-12-31T23:13:06Z
2017-01-01T18:50:48Z
2017-01-01T18:50:48Z
2017-01-01T18:50:55Z
BUG: Patch float and uint handling in to_numeric
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 02c7ac150c6af..b0169dd8ac896 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -291,6 +291,7 @@ Bug Fixes - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) - Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`) - Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`) +- Bug in ``pd.to_numeric()`` in which float and unsigned integer elements were being improperly casted (:issue:`14941`, :issue:`15005`) - Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py index f808abcda9418..8a8960a057926 100644 --- a/pandas/tools/tests/test_util.py +++ b/pandas/tools/tests/test_util.py @@ -426,12 +426,16 @@ def test_downcast(self): # cannot cast to an integer (signed or unsigned) # because we have a float number - data = ['1.1', 2, 3] - expected = np.array([1.1, 2, 3], dtype=np.float64) + data = (['1.1', 2, 3], + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00]) + expected = (np.array([1.1, 2, 3], dtype=np.float64), + np.array([10000.0, 20000, 3000, + 40000.36, 50000, 50000.00], dtype=np.float64)) - for downcast in ('integer', 'signed', 'unsigned'): - res = pd.to_numeric(data, downcast=downcast) - tm.assert_numpy_array_equal(res, expected) + for _data, _expected in zip(data, expected): + for downcast in ('integer', 'signed', 'unsigned'): + res = pd.to_numeric(_data, downcast=downcast) + tm.assert_numpy_array_equal(res, _expected) # the smallest integer dtype need not be np.(u)int8 data = ['256', 257, 258] @@ -459,8 +463,7 @@ def test_downcast_limits(self): ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]), ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]), ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]), - # Test will be skipped until there is more uint64 support. - # ('uint64', u, [iinfo(uint64).min, iinfo(uint64).max]), + ('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]), ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]), ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]), ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]), @@ -469,8 +472,7 @@ def test_downcast_limits(self): ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]), ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), - # Test will be skipped until there is more uint64 support. - # ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]), + ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]) ] for dtype, downcast, min_max in dtype_downcast_min_max: diff --git a/pandas/tools/util.py b/pandas/tools/util.py index daecf3d093680..381e29283d417 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -225,7 +225,7 @@ def to_numeric(arg, errors='raise', downcast=None): if typecodes is not None: # from smallest to largest for dtype in typecodes: - if np.dtype(dtype).itemsize < values.dtype.itemsize: + if np.dtype(dtype).itemsize <= values.dtype.itemsize: values = _possibly_downcast_to_dtype( values, dtype) diff --git a/pandas/types/cast.py b/pandas/types/cast.py index ff4fb73d6a9b6..6b1c3f9c00351 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -101,8 +101,8 @@ def trans(x): # noqa arr = np.array([r[0]]) # if we have any nulls, then we are done - if isnull(arr).any() or not np.allclose(arr, - trans(arr).astype(dtype)): + if (isnull(arr).any() or + not np.allclose(arr, trans(arr).astype(dtype), rtol=0)): return result # a comparable, e.g. a Decimal may slip in here @@ -114,7 +114,7 @@ def trans(x): # noqa notnull(result).all()): new_result = trans(result).astype(dtype) try: - if np.allclose(new_result, result): + if np.allclose(new_result, result, rtol=0): return new_result except:
1) Patches `float` handling by reducing the "closeness" level when checking conversions. 2) Patches `uint` handling by allowing casts to `uint` dtypes of equal or lesser size to `int64` (when values are less than `INT64_MAX` Closes #14941. Follow-up to #15005.
https://api.github.com/repos/pandas-dev/pandas/pulls/15024
2016-12-31T10:54:57Z
2016-12-31T15:45:30Z
null
2016-12-31T18:52:43Z
BUG: Parse uint64 in read_csv
diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py index 2ce3c4726b783..52064d2cdb8a2 100644 --- a/asv_bench/benchmarks/io_bench.py +++ b/asv_bench/benchmarks/io_bench.py @@ -128,6 +128,29 @@ def time_read_parse_dates_iso8601(self): read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo']) +class read_uint64_integers(object): + goal_time = 0.2 + + def setup(self): + self.na_values = [2**63 + 500] + + self.arr1 = np.arange(10000).astype('uint64') + 2**63 + self.data1 = '\n'.join(map(lambda x: str(x), self.arr1)) + + self.arr2 = self.arr1.copy().astype(object) + self.arr2[500] = -1 + self.data2 = '\n'.join(map(lambda x: str(x), self.arr2)) + + def time_read_uint64(self): + read_csv(StringIO(self.data1), header=None) + + def time_read_uint64_neg_values(self): + read_csv(StringIO(self.data2), header=None) + + def time_read_uint64_na_values(self): + read_csv(StringIO(self.data1), header=None, na_values=self.na_values) + + class write_csv_standard(object): goal_time = 0.2 diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index a947b4f3ca0ac..364a85f4c5947 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -288,6 +288,7 @@ Bug Fixes - Bug in ``Index`` power operations with reversed operands (:issue:`14973`) - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) - Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) +- Bug in ``pd.read_csv()`` in which unsigned 64-bit integer elements were being improperly converted to the wrong data types (:issue:`14983`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) - Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`) - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py index e694e529212aa..6a05dada05e0d 100644 --- a/pandas/io/tests/parser/common.py +++ b/pandas/io/tests/parser/common.py @@ -921,29 +921,39 @@ def test_int64_overflow(self): self.assertRaises(OverflowError, self.read_csv, StringIO(data), converters={'ID': conv}) - # These numbers fall right inside the int64 range, + # These numbers fall right inside the int64-uint64 range, # so they should be parsed as string. + ui_max = np.iinfo(np.uint64).max i_max = np.iinfo(np.int64).max i_min = np.iinfo(np.int64).min - for x in [i_max, i_min]: + for x in [i_max, i_min, ui_max]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([x]) tm.assert_frame_equal(result, expected) - # These numbers fall just outside the int64 range, + # These numbers fall just outside the int64-uint64 range, # so they should be parsed as string. - too_big = i_max + 1 + too_big = ui_max + 1 too_small = i_min - 1 for x in [too_big, too_small]: result = self.read_csv(StringIO(str(x)), header=None) - if self.engine == 'python' and x == too_big: - expected = DataFrame([x]) - else: - expected = DataFrame([str(x)]) + expected = DataFrame([str(x)]) tm.assert_frame_equal(result, expected) + # No numerical dtype can hold both negative and uint64 values, + # so they should be cast as string. + data = '-1\n' + str(2**63) + expected = DataFrame([str(-1), str(2**63)]) + result = self.read_csv(StringIO(data), header=None) + tm.assert_frame_equal(result, expected) + + data = str(2**63) + '\n-1' + expected = DataFrame([str(2**63), str(-1)]) + result = self.read_csv(StringIO(data), header=None) + tm.assert_frame_equal(result, expected) + def test_empty_with_nrows_chunksize(self): # see gh-9535 expected = DataFrame([], columns=['foo', 'bar']) diff --git a/pandas/io/tests/parser/dtypes.py b/pandas/io/tests/parser/dtypes.py index b9ab79c3b9d54..abcd14e9499cb 100644 --- a/pandas/io/tests/parser/dtypes.py +++ b/pandas/io/tests/parser/dtypes.py @@ -275,3 +275,11 @@ def test_empty_dtype(self): result = self.read_csv(StringIO(data), header=0, dtype={'a': np.int32, 1: np.float64}) tm.assert_frame_equal(result, expected) + + def test_numeric_dtype(self): + data = '0\n1' + + for dt in np.typecodes['AllInteger'] + np.typecodes['Float']: + expected = pd.DataFrame([0, 1], dtype=dt) + result = self.read_csv(StringIO(data), header=None, dtype=dt) + tm.assert_frame_equal(expected, result) diff --git a/pandas/io/tests/parser/na_values.py b/pandas/io/tests/parser/na_values.py index e245bc5589145..2cbd7cdedf2ab 100644 --- a/pandas/io/tests/parser/na_values.py +++ b/pandas/io/tests/parser/na_values.py @@ -289,3 +289,17 @@ def test_na_values_dict_col_index(self): out = self.read_csv(StringIO(data), na_values=na_values) expected = DataFrame({'a': [np.nan, 1]}) tm.assert_frame_equal(out, expected) + + def test_na_values_uint64(self): + # see gh-14983 + + na_values = [2**63] + data = str(2**63) + '\n' + str(2**63 + 1) + expected = DataFrame([str(2**63), str(2**63 + 1)]) + out = self.read_csv(StringIO(data), header=None, na_values=na_values) + tm.assert_frame_equal(out, expected) + + data = str(2**63) + ',1' + '\n,2' + expected = DataFrame([[str(2**63), 1], ['', 2]]) + out = self.read_csv(StringIO(data), header=None) + tm.assert_frame_equal(out, expected) diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 2464ee15b36b7..c5082e999d19c 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -193,6 +193,14 @@ cdef extern from "parser/tokenizer.h": int *line_start int col + ctypedef struct uint_state: + int seen_sint + int seen_uint + int seen_null + + void uint_state_init(uint_state *self) + int uint64_conflict(uint_state *self) + void coliter_setup(coliter_t *it, parser_t *parser, int i, int start) nogil void COLITER_NEXT(coliter_t, const char *) nogil @@ -217,7 +225,8 @@ cdef extern from "parser/tokenizer.h": int64_t str_to_int64(char *p_item, int64_t int_min, int64_t int_max, int *error, char tsep) nogil -# uint64_t str_to_uint64(char *p_item, uint64_t uint_max, int *error) + uint64_t str_to_uint64(uint_state *state, char *p_item, int64_t int_max, + uint64_t uint_max, int *error, char tsep) nogil double xstrtod(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing) nogil @@ -1127,6 +1136,14 @@ cdef class TextReader: try: col_res, na_count = self._convert_with_dtype( dt, i, start, end, na_filter, 0, na_hashset, na_flist) + except ValueError: + # This error is raised from trying to convert to uint64, + # and we discover that we cannot convert to any numerical + # dtype successfully. As a result, we leave the data + # column AS IS with object dtype. + col_res, na_count = self._convert_with_dtype( + np.dtype('object'), i, start, end, 0, + 0, na_hashset, na_flist) except OverflowError: col_res, na_count = self._convert_with_dtype( np.dtype('object'), i, start, end, na_filter, @@ -1164,12 +1181,17 @@ cdef class TextReader: kh_str_t *na_hashset, object na_flist): if is_integer_dtype(dtype): - result, na_count = _try_int64(self.parser, i, start, - end, na_filter, na_hashset) - if user_dtype and na_count is not None: - if na_count > 0: - raise ValueError("Integer column has NA values in " - "column {column}".format(column=i)) + try: + result, na_count = _try_int64(self.parser, i, start, + end, na_filter, na_hashset) + if user_dtype and na_count is not None: + if na_count > 0: + raise ValueError("Integer column has NA values in " + "column {column}".format(column=i)) + except OverflowError: + result = _try_uint64(self.parser, i, start, end, + na_filter, na_hashset) + na_count = 0 if result is not None and dtype != 'int64': result = result.astype(dtype) @@ -1750,6 +1772,78 @@ cdef inline int _try_double_nogil(parser_t *parser, int col, return 0 +cdef _try_uint64(parser_t *parser, int col, int line_start, int line_end, + bint na_filter, kh_str_t *na_hashset): + cdef: + int error + size_t i, lines + coliter_t it + uint64_t *data + ndarray result + khiter_t k + uint_state state + + lines = line_end - line_start + result = np.empty(lines, dtype=np.uint64) + data = <uint64_t *> result.data + + uint_state_init(&state) + coliter_setup(&it, parser, col, line_start) + with nogil: + error = _try_uint64_nogil(parser, col, line_start, line_end, + na_filter, na_hashset, data, &state) + if error != 0: + if error == ERROR_OVERFLOW: + # Can't get the word variable + raise OverflowError('Overflow') + return None + + if uint64_conflict(&state): + raise ValueError('Cannot convert to numerical dtype') + + if state.seen_sint: + raise OverflowError('Overflow') + + return result + +cdef inline int _try_uint64_nogil(parser_t *parser, int col, int line_start, + int line_end, bint na_filter, + const kh_str_t *na_hashset, + uint64_t *data, uint_state *state) nogil: + cdef: + int error + size_t i + size_t lines = line_end - line_start + coliter_t it + const char *word = NULL + khiter_t k + + coliter_setup(&it, parser, col, line_start) + + if na_filter: + for i in range(lines): + COLITER_NEXT(it, word) + k = kh_get_str(na_hashset, word) + # in the hash table + if k != na_hashset.n_buckets: + state.seen_null = 1 + data[i] = 0 + continue + + data[i] = str_to_uint64(state, word, INT64_MAX, UINT64_MAX, + &error, parser.thousands) + if error != 0: + return error + else: + for i in range(lines): + COLITER_NEXT(it, word) + data[i] = str_to_uint64(state, word, INT64_MAX, UINT64_MAX, + &error, parser.thousands) + if error != 0: + return error + + return 0 + cdef _try_int64(parser_t *parser, int col, int line_start, int line_end, bint na_filter, kh_str_t *na_hashset): cdef: diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index 1ea62d66345bd..bc729cd3e7453 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -1757,6 +1757,16 @@ double round_trip(const char *p, char **q, char decimal, char sci, char tsep, // End of xstrtod code // --------------------------------------------------------------------------- +void uint_state_init(uint_state *self) { + self->seen_sint = 0; + self->seen_uint = 0; + self->seen_null = 0; +} + +int uint64_conflict(uint_state *self) { + return self->seen_uint && (self->seen_sint || self->seen_null); +} + int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max, int *error, char tsep) { const char *p = (const char *)p_item; @@ -1876,3 +1886,88 @@ int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max, *error = 0; return number; } + +uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max, + uint64_t uint_max, int *error, char tsep) { + const char *p = (const char *)p_item; + uint64_t pre_max = uint_max / 10; + int dig_pre_max = uint_max % 10; + uint64_t number = 0; + int d; + + // Skip leading spaces. + while (isspace(*p)) { + ++p; + } + + // Handle sign. + if (*p == '-') { + state->seen_sint = 1; + *error = 0; + return 0; + } else if (*p == '+') { + p++; + } + + // Check that there is a first digit. + if (!isdigit(*p)) { + // Error... + *error = ERROR_NO_DIGITS; + return 0; + } + + // If number is less than pre_max, at least one more digit + // can be processed without overflowing. + // + // Process the digits. + d = *p; + if (tsep != '\0') { + while (1) { + if (d == tsep) { + d = *++p; + continue; + } else if (!isdigit(d)) { + break; + } + if ((number < pre_max) || + ((number == pre_max) && (d - '0' <= dig_pre_max))) { + number = number * 10 + (d - '0'); + d = *++p; + + } else { + *error = ERROR_OVERFLOW; + return 0; + } + } + } else { + while (isdigit(d)) { + if ((number < pre_max) || + ((number == pre_max) && (d - '0' <= dig_pre_max))) { + number = number * 10 + (d - '0'); + d = *++p; + + } else { + *error = ERROR_OVERFLOW; + return 0; + } + } + } + + // Skip trailing spaces. + while (isspace(*p)) { + ++p; + } + + // Did we use up all the characters? + if (*p) { + *error = ERROR_INVALID_CHARS; + return 0; + } + + if (number > int_max) { + state->seen_uint = 1; + } + + *error = 0; + return number; +} diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h index e01812f1c5520..d31bf4b688c58 100644 --- a/pandas/src/parser/tokenizer.h +++ b/pandas/src/parser/tokenizer.h @@ -25,7 +25,6 @@ See LICENSE for the license #define ERROR_NO_DIGITS 1 #define ERROR_OVERFLOW 2 #define ERROR_INVALID_CHARS 3 -#define ERROR_MINUS_SIGN 4 #include "../headers/stdint.h" @@ -250,6 +249,18 @@ int tokenize_all_rows(parser_t *self); // Have parsed / type-converted a chunk of data // and want to free memory from the token stream +typedef struct uint_state { + int seen_sint; + int seen_uint; + int seen_null; +} uint_state; + +void uint_state_init(uint_state *self); + +int uint64_conflict(uint_state *self); + +uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max, + uint64_t uint_max, int *error, char tsep); int64_t str_to_int64(const char *p_item, int64_t int_min, int64_t int_max, int *error, char tsep); double xstrtod(const char *p, char **q, char decimal, char sci, char tsep,
Adds behavior to allow for parsing of `uint64` data in `read_csv`. Also ensures that they are properly handled along with `NaN` and negative values. Closes #14983.
https://api.github.com/repos/pandas-dev/pandas/pulls/15020
2016-12-31T10:11:45Z
2017-01-02T19:50:02Z
2017-01-02T19:50:02Z
2017-01-02T19:56:19Z
MAINT: Refactor logic in maybe_convert_*
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index d1e264b06eccc..933fc8fb1cc9b 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -85,6 +85,128 @@ try: except AttributeError: pass + +cdef class Seen(object): + """ + Class for keeping track of the types of elements + encountered when trying to perform type conversions. + """ + + cdef public: + bint int_ # seen_int + bint bool_ # seen_bool + bint null_ # seen_null + bint uint_ # seen_uint (unsigned integer) + bint sint_ # seen_sint (signed integer) + bint float_ # seen_float + bint object_ # seen_object + bint complex_ # seen_complex + bint datetime_ # seen_datetime + bint coerce_numeric # coerce data to numeric + bint timedelta_ # seen_timedelta + bint datetimetz_ # seen_datetimetz + + def __cinit__(self, bint coerce_numeric=0): + """ + Initialize a Seen instance. + + Parameters + ---------- + coerce_numeric : bint, default 0 + Whether or not to force conversion to a numeric data type if + initial methods to convert to numeric fail. + """ + self.int_ = 0 + self.bool_ = 0 + self.null_ = 0 + self.uint_ = 0 + self.sint_ = 0 + self.float_ = 0 + self.object_ = 0 + self.complex_ = 0 + self.datetime_ = 0 + self.timedelta_ = 0 + self.datetimetz_ = 0 + self.coerce_numeric = coerce_numeric + + cdef inline bint check_uint64_conflict(self) except -1: + """ + Check whether we can safely convert a uint64 array to a numeric dtype. + + There are two cases when conversion to numeric dtype with a uint64 + array is not safe (and will therefore not be performed) + + 1) A NaN element is encountered. + + uint64 cannot be safely cast to float64 due to truncation issues + at the extreme ends of the range. + + 2) A negative number is encountered. + + There is no numerical dtype that can hold both negative numbers + and numbers greater than INT64_MAX. Hence, at least one number + will be improperly cast if we convert to a numeric dtype. + + Returns + ------- + return_values : bool + Whether or not we should return the original input array to avoid + data truncation. + + Raises + ------ + ValueError : uint64 elements were detected, and at least one of the + two conflict cases was also detected. However, we are + trying to force conversion to a numeric dtype. + """ + if self.uint_ and (self.null_ or self.sint_): + if not self.coerce_numeric: + return True + + if self.null_: + msg = ("uint64 array detected, and such an " + "array cannot contain NaN.") + else: # self.sint_ = 1 + msg = ("uint64 and negative values detected. " + "Cannot safely return a numeric array " + "without truncating data.") + + raise ValueError(msg) + return False + + cdef inline saw_null(self): + """ + Set flags indicating that a null value was encountered. + """ + self.null_ = 1 + self.float_ = 1 + + def saw_int(self, val): + """ + Set flags indicating that an integer value was encountered. + + Parameters + ---------- + val : Python int + Value with which to set the flags. + """ + self.int_ = 1 + self.sint_ = self.sint_ or (val < 0) + self.uint_ = self.uint_ or (val > iINT64_MAX) + + @property + def numeric_(self): + return self.complex_ or self.float_ or self.int_ + + @property + def is_bool(self): + return not (self.datetime_ or self.numeric_ or self.timedelta_) + + @property + def is_float_or_complex(self): + return not (self.bool_ or self.datetime_ or self.timedelta_) + + cdef _try_infer_map(v): """ if its in our map, just return the dtype """ cdef: @@ -629,56 +751,6 @@ cdef int64_t iINT64_MIN = <int64_t> INT64_MIN cdef uint64_t iUINT64_MAX = <uint64_t> UINT64_MAX -cdef inline bint _check_uint64_nan(bint seen_uint, bint seen_null, - bint coerce_numeric) except -1: - """ - Check whether we have encountered uint64 when handling a NaN element. - - If uint64 has been encountered, we cannot safely cast to float64 due - to truncation problems (this would occur if we return a numeric array - containing a NaN element). - - Returns - ------- - return_values : bool - Whether or not we should return the original input array to avoid - data truncation. - """ - if seen_null and seen_uint: - if not coerce_numeric: - return True - else: - raise ValueError("uint64 array detected, and such an " - "array cannot contain NaN.") - - return False - - -cdef inline bint _check_uint64_int64_conflict(bint seen_sint, bint seen_uint, - bint coerce_numeric) except -1: - """ - Check whether we have encountered both int64 and uint64 elements. - - If both have been encountered, we cannot safely cast to an integer - dtype since none is large enough to hold both types of elements. - - Returns - ------- - return_values : bool - Whether or not we should return the original input array to avoid - data truncation. - """ - if seen_sint and seen_uint: - if not coerce_numeric: - return True - else: - raise ValueError("uint64 and negative values detected. " - "Cannot safely return a numeric array " - "without truncating data.") - - return False - - def maybe_convert_numeric(ndarray[object] values, set na_values, bint convert_empty=True, bint coerce_numeric=False): """ @@ -712,18 +784,12 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, cdef: int status, maybe_int Py_ssize_t i, n = values.size + Seen seen = Seen(coerce_numeric); ndarray[float64_t] floats = np.empty(n, dtype='f8') ndarray[complex128_t] complexes = np.empty(n, dtype='c16') ndarray[int64_t] ints = np.empty(n, dtype='i8') ndarray[uint64_t] uints = np.empty(n, dtype='u8') ndarray[uint8_t] bools = np.empty(n, dtype='u1') - bint seen_null = False - bint seen_uint = False - bint seen_sint = False - bint seen_float = False - bint seen_complex = False - bint seen_int = False - bint seen_bool = False object val float64_t fval @@ -731,88 +797,52 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, val = values[i] if val.__hash__ is not None and val in na_values: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values - + seen.saw_null() floats[i] = complexes[i] = nan - seen_float = True elif util.is_float_object(val): if val != val: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values + seen.null_ = True floats[i] = complexes[i] = val - seen_float = True + seen.float_ = True elif util.is_integer_object(val): floats[i] = complexes[i] = val - as_int = int(val) - seen_int = True - - seen_uint = seen_uint or (as_int > iINT64_MAX) - seen_sint = seen_sint or (as_int < 0) - if (_check_uint64_nan(seen_uint, seen_null, coerce_numeric) or - _check_uint64_int64_conflict(seen_sint, seen_uint, - coerce_numeric)): - return values + as_int = int(val) + seen.saw_int(as_int) - if seen_uint: - uints[i] = as_int - elif seen_sint: - ints[i] = as_int - else: + if as_int >= 0: uints[i] = as_int + if as_int <= iINT64_MAX: ints[i] = as_int elif util.is_bool_object(val): floats[i] = uints[i] = ints[i] = bools[i] = val - seen_bool = True + seen.bool_ = True elif val is None: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values - + seen.saw_null() floats[i] = complexes[i] = nan - seen_float = True elif hasattr(val, '__len__') and len(val) == 0: - if convert_empty or coerce_numeric: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values - + if convert_empty or seen.coerce_numeric: + seen.saw_null() floats[i] = complexes[i] = nan - seen_float = True else: raise ValueError('Empty string encountered') elif util.is_complex_object(val): complexes[i] = val - seen_complex = True + seen.complex_ = True elif is_decimal(val): floats[i] = complexes[i] = val - seen_float = True + seen.float_ = True else: try: status = floatify(val, &fval, &maybe_int) if fval in na_values: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values - + seen.saw_null() floats[i] = complexes[i] = nan - seen_float = True else: if fval != fval: - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values + seen.null_ = True floats[i] = fval @@ -820,57 +850,43 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, as_int = int(val) if as_int in na_values: - seen_float = True - seen_null = True + seen.saw_null() else: - seen_uint = seen_uint or (as_int > iINT64_MAX) - seen_sint = seen_sint or (as_int < 0) - seen_int = True - - if (_check_uint64_nan(seen_uint, seen_null, - coerce_numeric) or - _check_uint64_int64_conflict(seen_sint, seen_uint, - coerce_numeric)): - return values + seen.saw_int(as_int) - if not (seen_float or as_int in na_values): + if not (seen.float_ or as_int in na_values): if as_int < iINT64_MIN or as_int > iUINT64_MAX: raise ValueError('Integer out of range.') - if seen_uint: - uints[i] = as_int - elif seen_sint: - ints[i] = as_int - else: + if as_int >= 0: uints[i] = as_int + if as_int <= iINT64_MAX: ints[i] = as_int else: - seen_float = True + seen.float_ = True except (TypeError, ValueError) as e: - if not coerce_numeric: + if not seen.coerce_numeric: raise type(e)(str(e) + ' at position {}'.format(i)) elif "uint64" in str(e): # Exception from check functions. raise - seen_null = True - if _check_uint64_nan(seen_uint, seen_null, - coerce_numeric): - return values - + seen.saw_null() floats[i] = nan - seen_float = True - if seen_complex: + if seen.check_uint64_conflict(): + return values + + if seen.complex_: return complexes - elif seen_float: + elif seen.float_: return floats - elif seen_int: - if seen_uint: + elif seen.int_: + if seen.uint_: return uints else: return ints - elif seen_bool: + elif seen.bool_: return bools.view(np.bool_) - elif seen_uint: + elif seen.uint_: return uints return ints @@ -890,18 +906,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, ndarray[uint8_t] bools ndarray[int64_t] idatetimes ndarray[int64_t] itimedeltas - bint seen_float = 0 - bint seen_complex = 0 - bint seen_datetime = 0 - bint seen_datetimetz = 0 - bint seen_timedelta = 0 - bint seen_int = 0 - bint seen_uint = 0 - bint seen_sint = 0 - bint seen_bool = 0 - bint seen_object = 0 - bint seen_null = 0 - bint seen_numeric = 0 + Seen seen = Seen(); object val, onan float64_t fval, fnan @@ -928,54 +933,52 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, val = objects[i] if val is None: - seen_null = 1 + seen.null_ = 1 floats[i] = complexes[i] = fnan elif val is NaT: if convert_datetime: idatetimes[i] = iNaT - seen_datetime = 1 + seen.datetime_ = 1 if convert_timedelta: itimedeltas[i] = iNaT - seen_timedelta = 1 + seen.timedelta_ = 1 if not (convert_datetime or convert_timedelta): - seen_object = 1 + seen.object_ = 1 elif util.is_bool_object(val): - seen_bool = 1 + seen.bool_ = 1 bools[i] = val elif util.is_float_object(val): floats[i] = complexes[i] = val - seen_float = 1 + seen.float_ = 1 elif util.is_datetime64_object(val): if convert_datetime: idatetimes[i] = convert_to_tsobject( val, None, None, 0, 0).value - seen_datetime = 1 + seen.datetime_ = 1 else: - seen_object = 1 - # objects[i] = val.astype('O') + seen.object_ = 1 break elif is_timedelta(val): if convert_timedelta: itimedeltas[i] = convert_to_timedelta64(val, 'ns') - seen_timedelta = 1 + seen.timedelta_ = 1 else: - seen_object = 1 + seen.object_ = 1 break elif util.is_integer_object(val): - seen_int = 1 + seen.int_ = 1 floats[i] = <float64_t> val complexes[i] = <double complex> val - if not seen_null: - seen_uint = seen_uint or (int(val) > iINT64_MAX) - seen_sint = seen_sint or (val < 0) + if not seen.null_: + seen.saw_int(int(val)) - if seen_uint and seen_sint: - seen_object = 1 + if seen.uint_ and seen.sint_: + seen.object_ = 1 break - if seen_uint: + if seen.uint_: uints[i] = val - elif seen_sint: + elif seen.sint_: ints[i] = val else: uints[i] = val @@ -983,106 +986,101 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, elif util.is_complex_object(val): complexes[i] = val - seen_complex = 1 + seen.complex_ = 1 elif PyDateTime_Check(val) or util.is_datetime64_object(val): # if we have an tz's attached then return the objects if convert_datetime: if getattr(val, 'tzinfo', None) is not None: - seen_datetimetz = 1 + seen.datetimetz_ = 1 break else: - seen_datetime = 1 + seen.datetime_ = 1 idatetimes[i] = convert_to_tsobject( val, None, None, 0, 0).value else: - seen_object = 1 + seen.object_ = 1 break elif try_float and not util.is_string_object(val): # this will convert Decimal objects try: floats[i] = float(val) complexes[i] = complex(val) - seen_float = 1 + seen.float_ = 1 except Exception: - seen_object = 1 + seen.object_ = 1 break else: - seen_object = 1 + seen.object_ = 1 break - seen_numeric = seen_complex or seen_float or seen_int - # we try to coerce datetime w/tz but must all have the same tz - if seen_datetimetz: + if seen.datetimetz_: if len(set([getattr(val, 'tzinfo', None) for val in objects])) == 1: from pandas import DatetimeIndex return DatetimeIndex(objects) - seen_object = 1 - - if not seen_object: + seen.object_ = 1 + if not seen.object_: if not safe: - if seen_null: - if not seen_bool and not seen_datetime and not seen_timedelta: - if seen_complex: + if seen.null_: + if seen.is_float_or_complex: + if seen.complex_: return complexes - elif seen_float or seen_int: + elif seen.float_ or seen.int_: return floats else: - if not seen_bool: - if seen_datetime: - if not seen_numeric: + if not seen.bool_: + if seen.datetime_: + if not seen.numeric_: return datetimes - elif seen_timedelta: - if not seen_numeric: + elif seen.timedelta_: + if not seen.numeric_: return timedeltas else: - if seen_complex: + if seen.complex_: return complexes - elif seen_float: + elif seen.float_: return floats - elif seen_int: - if seen_uint: + elif seen.int_: + if seen.uint_: return uints else: return ints - elif (not seen_datetime and not seen_numeric - and not seen_timedelta): + elif seen.is_bool: return bools.view(np.bool_) else: # don't cast int to float, etc. - if seen_null: - if not seen_bool and not seen_datetime and not seen_timedelta: - if seen_complex: - if not seen_int: + if seen.null_: + if seen.is_float_or_complex: + if seen.complex_: + if not seen.int_: return complexes - elif seen_float: - if not seen_int: + elif seen.float_: + if not seen.int_: return floats else: - if not seen_bool: - if seen_datetime: - if not seen_numeric: + if not seen.bool_: + if seen.datetime_: + if not seen.numeric_: return datetimes - elif seen_timedelta: - if not seen_numeric: + elif seen.timedelta_: + if not seen.numeric_: return timedeltas else: - if seen_complex: - if not seen_int: + if seen.complex_: + if not seen.int_: return complexes - elif seen_float: - if not seen_int: + elif seen.float_: + if not seen.int_: return floats - elif seen_int: - if seen_uint: + elif seen.int_: + if seen.uint_: return uints else: return ints - elif (not seen_datetime and not seen_numeric - and not seen_timedelta): + elif seen.is_bool: return bools.view(np.bool_) return objects diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index 796c77354d6f8..8180757d9e706 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -301,7 +301,6 @@ def test_convert_numeric_int64_uint64(self): for case in cases: if coerce: with tm.assertRaisesRegexp(ValueError, msg): - print(case) lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce) else:
Creates a Seen class to abstract objects encountered when doing type conversions. Follow-up to #15005.
https://api.github.com/repos/pandas-dev/pandas/pulls/15018
2016-12-30T23:14:36Z
2017-01-03T11:24:59Z
null
2017-01-03T18:22:55Z
TST: Series Division with zeros numpy array (#8674)
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 086946d05d7a6..6650a171b818b 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -143,6 +143,19 @@ def test_div(self): expected = Series([-inf, nan, inf]) assert_series_equal(result, expected) + # GH 8674 + zero_array = np.array([0] * 5) + data = np.random.randn(5) + expected = pd.Series([0.] * 5) + result = zero_array / pd.Series(data) + assert_series_equal(result, expected) + + result = pd.Series(zero_array) / data + assert_series_equal(result, expected) + + result = pd.Series(zero_array) / pd.Series(data) + assert_series_equal(result, expected) + def test_operators(self): def _check_op(series, other, op, pos_only=False, check_dtype=True):
- [x] closes #8674 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` This issue operates normally on master and relates to Python's 2.x division. Doesn't appear that any PRs in 0.20 addressed this issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/15013
2016-12-30T07:35:45Z
2016-12-30T09:39:39Z
2016-12-30T09:39:39Z
2017-12-20T02:04:02Z
DOC/BLD: doc building with python 3
diff --git a/.travis.yml b/.travis.yml index becb347c3700f..b2a1a8a63cfe6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -182,9 +182,9 @@ matrix: - CACHE_NAME="35_ascii" - USE_CACHE=true # In allow_failures - - python: 2.7 + - python: 3.5 env: - - PYTHON_VERSION=2.7 + - PYTHON_VERSION=3.5 - JOB_NAME: "doc_build" - FULL_DEPS=true - DOC_BUILD=true @@ -275,9 +275,9 @@ matrix: - LOCALE_OVERRIDE="C" - CACHE_NAME="35_ascii" - USE_CACHE=true - - python: 2.7 + - python: 3.5 env: - - PYTHON_VERSION=2.7 + - PYTHON_VERSION=3.5 - JOB_NAME: "doc_build" - FULL_DEPS=true - DOC_BUILD=true diff --git a/ci/requirements-2.7_DOC_BUILD.build b/ci/requirements-3.5_DOC_BUILD.build similarity index 52% rename from ci/requirements-2.7_DOC_BUILD.build rename to ci/requirements-3.5_DOC_BUILD.build index faf1e3559f7f1..9558cf00ddf5c 100644 --- a/ci/requirements-2.7_DOC_BUILD.build +++ b/ci/requirements-3.5_DOC_BUILD.build @@ -1,4 +1,4 @@ -dateutil +python-dateutil pytz numpy cython diff --git a/ci/requirements-2.7_DOC_BUILD.run b/ci/requirements-3.5_DOC_BUILD.run similarity index 92% rename from ci/requirements-2.7_DOC_BUILD.run rename to ci/requirements-3.5_DOC_BUILD.run index 9da8e76bc7e22..644a16f51f4b6 100644 --- a/ci/requirements-2.7_DOC_BUILD.run +++ b/ci/requirements-3.5_DOC_BUILD.run @@ -7,7 +7,7 @@ notebook matplotlib scipy lxml -beautiful-soup +beautifulsoup4 html5lib pytables openpyxl=1.8.5 diff --git a/ci/requirements-2.7_DOC_BUILD.sh b/ci/requirements-3.5_DOC_BUILD.sh similarity index 100% rename from ci/requirements-2.7_DOC_BUILD.sh rename to ci/requirements-3.5_DOC_BUILD.sh diff --git a/doc/_templates/autosummary/accessor.rst b/doc/_templates/autosummary/accessor.rst index 1401121fb51c6..4ba745cd6fdba 100644 --- a/doc/_templates/autosummary/accessor.rst +++ b/doc/_templates/autosummary/accessor.rst @@ -3,4 +3,4 @@ .. currentmodule:: {{ module.split('.')[0] }} -.. automethod:: {{ [module.split('.')[1], objname]|join('.') }} +.. autoaccessor:: {{ (module.split('.')[1:] + [objname]) | join('.') }} diff --git a/doc/_templates/autosummary/accessor_attribute.rst b/doc/_templates/autosummary/accessor_attribute.rst index a2f0eb5e068c4..b5ad65d6a736f 100644 --- a/doc/_templates/autosummary/accessor_attribute.rst +++ b/doc/_templates/autosummary/accessor_attribute.rst @@ -3,4 +3,4 @@ .. currentmodule:: {{ module.split('.')[0] }} -.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }} +.. autoaccessorattribute:: {{ (module.split('.')[1:] + [objname]) | join('.') }} diff --git a/doc/_templates/autosummary/accessor_callable.rst b/doc/_templates/autosummary/accessor_callable.rst index 6f45e0fd01e16..7a3301814f5f4 100644 --- a/doc/_templates/autosummary/accessor_callable.rst +++ b/doc/_templates/autosummary/accessor_callable.rst @@ -3,4 +3,4 @@ .. currentmodule:: {{ module.split('.')[0] }} -.. autoaccessorcallable:: {{ [module.split('.')[1], objname]|join('.') }}.__call__ +.. autoaccessorcallable:: {{ (module.split('.')[1:] + [objname]) | join('.') }}.__call__ diff --git a/doc/_templates/autosummary/accessor_method.rst b/doc/_templates/autosummary/accessor_method.rst index 43dfc3b813120..aefbba6ef1bbc 100644 --- a/doc/_templates/autosummary/accessor_method.rst +++ b/doc/_templates/autosummary/accessor_method.rst @@ -3,4 +3,4 @@ .. currentmodule:: {{ module.split('.')[0] }} -.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }} +.. autoaccessormethod:: {{ (module.split('.')[1:] + [objname]) | join('.') }} diff --git a/doc/source/conf.py b/doc/source/conf.py index 4f679f3f728bf..1e82dfca87d17 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -326,6 +326,23 @@ from sphinx.ext.autosummary import Autosummary +class AccessorDocumenter(MethodDocumenter): + """ + Specialized Documenter subclass for accessors. + """ + + objtype = 'accessor' + directivetype = 'method' + + # lower than MethodDocumenter so this is not chosen for normal methods + priority = 0.6 + + def format_signature(self): + # this method gives an error/warning for the accessors, therefore + # overriding it (accessor has no arguments) + return '' + + class AccessorLevelDocumenter(Documenter): """ Specialized Documenter subclass for objects on accessor level (methods, @@ -381,12 +398,17 @@ class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter): objtype = 'accessorattribute' directivetype = 'attribute' + # lower than AttributeDocumenter so this is not chosen for normal attributes + priority = 0.6 class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter): objtype = 'accessormethod' directivetype = 'method' + # lower than MethodDocumenter so this is not chosen for normal methods + priority = 0.6 + class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter): """ @@ -483,6 +505,7 @@ def remove_flags_docstring(app, what, name, obj, options, lines): def setup(app): app.connect("autodoc-process-docstring", remove_flags_docstring) + app.add_autodocumenter(AccessorDocumenter) app.add_autodocumenter(AccessorAttributeDocumenter) app.add_autodocumenter(AccessorMethodDocumenter) app.add_autodocumenter(AccessorCallableDocumenter) diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index b8b0935cd5b96..49fbacba99592 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -786,7 +786,7 @@ def setup(self): # EmbeddedSphinxShell is created, its interactive shell member # is the same for each instance. - if mplbackend: + if mplbackend and 'matplotlib.backends' not in sys.modules: import matplotlib # Repeated calls to use() will not hurt us since `mplbackend` # is the same each time.
Makes our doc building machinery (specific accessor stuff) python 3 compat + switches travis doc building to python 3.5 + preventing some other warnings (see individual commits)
https://api.github.com/repos/pandas-dev/pandas/pulls/15012
2016-12-29T22:23:39Z
2016-12-30T14:46:18Z
2016-12-30T14:46:18Z
2017-03-05T11:48:08Z
DOC: python 3 compatibility in code examples
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 090998570a358..86d0c61398be1 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -129,8 +129,7 @@ To get back to the original Series or `numpy` array, use ``Series.astype(origina s s2 = s.astype('category') s2 - s3 = s2.astype('string') - s3 + s2.astype(str) np.asarray(s2) If you have already `codes` and `categories`, you can use the :func:`~pandas.Categorical.from_codes` diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index a9d0ab5476b66..d2df72b284a12 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -7,6 +7,7 @@ import pandas as pd import numpy as np + from pandas.compat import StringIO import random import os @@ -985,9 +986,6 @@ Skip row between header and data .. ipython:: python - from io import StringIO - import pandas as pd - data = """;;;; ;;;; ;;;; @@ -1014,7 +1012,7 @@ Option 1: pass rows explicitly to skiprows .. ipython:: python - pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', skiprows=[11,12], + pd.read_csv(StringIO(data), sep=';', skiprows=[11,12], index_col=0, parse_dates=True, header=10) Option 2: read column names and then data @@ -1022,15 +1020,12 @@ Option 2: read column names and then data .. ipython:: python - pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', - header=10, parse_dates=True, nrows=10).columns - columns = pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', - header=10, parse_dates=True, nrows=10).columns - pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', + pd.read_csv(StringIO(data), sep=';', header=10, nrows=10).columns + columns = pd.read_csv(StringIO(data), sep=';', header=10, nrows=10).columns + pd.read_csv(StringIO(data), sep=';', index_col=0, header=12, parse_dates=True, names=columns) - .. _cookbook.sql: SQL diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 8a1e06fa6d86c..a1c12044adc34 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -249,7 +249,7 @@ normal Python ``list``. Monotonicity of an index can be tested with the ``is_mon .. ipython:: python - df = pd.DataFrame(index=[2,3,3,4,5], columns=['data'], data=range(5)) + df = pd.DataFrame(index=[2,3,3,4,5], columns=['data'], data=list(range(5))) df.index.is_monotonic_increasing # no rows 0 or 1, but still returns rows 2, 3 (both of them), and 4: @@ -263,7 +263,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be .. ipython:: python - df = pd.DataFrame(index=[2,3,1,4,3,5], columns=['data'], data=range(6)) + df = pd.DataFrame(index=[2,3,1,4,3,5], columns=['data'], data=list(range(6))) df.index.is_monotonic_increasing # OK because 2 and 4 are in the index diff --git a/doc/source/io.rst b/doc/source/io.rst index 9dfe241062952..8ddf4186eba25 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -502,7 +502,7 @@ worth trying. .. ipython:: python :okwarning: - df = pd.DataFrame({'col_1':range(500000) + ['a', 'b'] + range(500000)}) + df = pd.DataFrame({'col_1': list(range(500000)) + ['a', 'b'] + list(range(500000))}) df.to_csv('foo') mixed_df = pd.read_csv('foo') mixed_df['col_1'].apply(type).value_counts() diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 3a2c48834991f..f90c2960fa30c 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -650,10 +650,16 @@ handling of NaN: because of an ordering bug. See also `Here <https://github.com/numpy/numpy/issues/641>`__ -.. ipython:: python +.. code-block:: ipython + + In [2]: pd.factorize(x, sort=True) + Out[2]: + (array([ 2, 2, -1, 3, 0, 1]), + Index([3.14, inf, u'A', u'B'], dtype='object')) + + In [3]: np.unique(x, return_inverse=True)[::-1] + Out[3]: (array([3, 3, 0, 4, 1, 2]), array([nan, 3.14, inf, 'A', 'B'], dtype=object)) - pd.factorize(x, sort=True) - np.unique(x, return_inverse=True)[::-1] .. note:: If you just want to handle one column as a categorical variable (like R's factor),
Some changes to our code examples needed to make them all run with python 3. (there are still problems with our accessor extensions to have doc building on py3 fully working)
https://api.github.com/repos/pandas-dev/pandas/pulls/15011
2016-12-29T15:01:28Z
2016-12-29T21:57:16Z
2016-12-29T21:57:16Z
2016-12-29T21:57:16Z
Fix gbq integration tests so that unique datasets/table names are used
diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 28820fd71af27..78891cae38bc0 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -1065,7 +1065,7 @@ def tearDown(self): pass def test_upload_data_as_service_account_with_key_path(self): - destination_table = DESTINATION_TABLE + "11" + destination_table = "{0}.{1}".format(DATASET_ID + "2", TABLE_ID + "1") test_size = 10 df = make_mixed_dataframe_v2(test_size) @@ -1124,10 +1124,7 @@ def tearDown(self): pass def test_upload_data_as_service_account_with_key_contents(self): - raise nose.SkipTest( - "flaky test") - - destination_table = DESTINATION_TABLE + "12" + destination_table = "{0}.{1}".format(DATASET_ID + "3", TABLE_ID + "1") test_size = 10 df = make_mixed_dataframe_v2(test_size)
This PR will resolve an issue where a gbq integration test was failing. The same dataset/table name was used twice in `test_gbq.py` which could result in a schema conflict if the previous table schema is cached and the new schema has not successfully propagated through Google's backend. See https://github.com/pandas-dev/pandas/commit/45910ae646eba417120dd2bd4ada68a018c97b32 All gbq units tests passed on Travis on my fork : https://travis-ci.org/parthea/pandas/builds/187463043
https://api.github.com/repos/pandas-dev/pandas/pulls/15009
2016-12-29T14:28:40Z
2016-12-30T18:45:21Z
2016-12-30T18:45:21Z
2016-12-30T18:46:50Z
TST: fix tests catching unorderable errors for python 3.6
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 3536a52432b8c..0e6773fd83404 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1807,8 +1807,7 @@ def test_order(self): idx = self.create_index() # 9816 deprecated if PY36: - with tm.assertRaisesRegexp(TypeError, "'>' not supported " - "between instances of 'str' and 'int'"): + with tm.assertRaisesRegexp(TypeError, "'>' not supported"): with tm.assert_produces_warning(FutureWarning): idx.order() elif PY3: @@ -1822,8 +1821,7 @@ def test_order(self): def test_argsort(self): idx = self.create_index() if PY36: - with tm.assertRaisesRegexp(TypeError, "'>' not supported " - "between instances of 'str' and 'int'"): + with tm.assertRaisesRegexp(TypeError, "'>' not supported"): result = idx.argsort() elif PY3: with tm.assertRaisesRegexp(TypeError, "unorderable types"): @@ -1836,8 +1834,7 @@ def test_argsort(self): def test_numpy_argsort(self): idx = self.create_index() if PY36: - with tm.assertRaisesRegexp(TypeError, "'>' not supported " - "between instances of 'str' and 'int'"): + with tm.assertRaisesRegexp(TypeError, "'>' not supported"): result = np.argsort(idx) elif PY3: with tm.assertRaisesRegexp(TypeError, "unorderable types"):
See also https://github.com/MacPython/pandas-wheels/pull/8, seems that the order of int and str is switched in some cases (eg https://travis-ci.org/MacPython/pandas-wheels/jobs/187223139). Therefore making the test here more robust. Related to https://github.com/pandas-dev/pandas/pull/14684
https://api.github.com/repos/pandas-dev/pandas/pulls/15007
2016-12-29T13:33:22Z
2016-12-30T18:46:02Z
2016-12-30T18:46:02Z
2016-12-30T18:46:02Z
BUG: Convert uint64 in maybe_convert_numeric
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index 6eda93c0a1dc8..3635438a7f76b 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -95,4 +95,23 @@ def setup(self, dtype, downcast): self.data = self.data_dict[dtype] def time_downcast(self, dtype, downcast): - pd.to_numeric(self.data, downcast=downcast) \ No newline at end of file + pd.to_numeric(self.data, downcast=downcast) + + +class MaybeConvertNumeric(object): + + def setup(self): + n = 1000000 + arr = np.repeat([2**63], n) + arr = arr + np.arange(n).astype('uint64') + arr = np.array([arr[i] if i%2 == 0 else + str(arr[i]) for i in range(n)], + dtype=object) + + arr[-1] = -1 + self.data = arr + self.na_values = set() + + def time_convert(self): + pd.lib.maybe_convert_numeric(self.data, self.na_values, + coerce_numeric=False) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0873e4b34b0b1..1ff591c86f6fa 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -319,5 +319,5 @@ Bug Fixes - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) -- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) +- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`, :issue:`14982`) - Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`) diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py index b6d1d4bb09f56..c6c2a9e954f55 100644 --- a/pandas/io/tests/parser/common.py +++ b/pandas/io/tests/parser/common.py @@ -944,26 +944,39 @@ def test_int64_overflow(self): 00013007854817840017963235 00013007854817840018860166""" + # 13007854817840016671868 > UINT64_MAX, so this + # will overflow and return object as the dtype. result = self.read_csv(StringIO(data)) self.assertTrue(result['ID'].dtype == object) - self.assertRaises(OverflowError, self.read_csv, - StringIO(data), converters={'ID': np.int64}) + # 13007854817840016671868 > UINT64_MAX, so attempts + # to cast to either int64 or uint64 will result in + # an OverflowError being raised. + for conv in (np.int64, np.uint64): + self.assertRaises(OverflowError, self.read_csv, + StringIO(data), converters={'ID': conv}) - # Just inside int64 range: parse as integer + # These numbers fall right inside the int64 range, + # so they should be parsed as string. i_max = np.iinfo(np.int64).max i_min = np.iinfo(np.int64).min + for x in [i_max, i_min]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([x]) tm.assert_frame_equal(result, expected) - # Just outside int64 range: parse as string + # These numbers fall just outside the int64 range, + # so they should be parsed as string. too_big = i_max + 1 too_small = i_min - 1 + for x in [too_big, too_small]: result = self.read_csv(StringIO(str(x)), header=None) - expected = DataFrame([str(x)]) + if self.engine == 'python' and x == too_big: + expected = DataFrame([x]) + else: + expected = DataFrame([str(x)]) tm.assert_frame_equal(result, expected) def test_empty_with_nrows_chunksize(self): diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index b6b13def193ff..d1e264b06eccc 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -13,9 +13,6 @@ from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX, # core.common import for fast inference checks -npy_int64_max = np.iinfo(np.int64).max - - cpdef bint is_float(object obj): return util.is_float_object(obj) @@ -629,13 +626,88 @@ cdef extern from "parse_helper.h": cdef int64_t iINT64_MAX = <int64_t> INT64_MAX cdef int64_t iINT64_MIN = <int64_t> INT64_MIN +cdef uint64_t iUINT64_MAX = <uint64_t> UINT64_MAX + + +cdef inline bint _check_uint64_nan(bint seen_uint, bint seen_null, + bint coerce_numeric) except -1: + """ + Check whether we have encountered uint64 when handling a NaN element. + + If uint64 has been encountered, we cannot safely cast to float64 due + to truncation problems (this would occur if we return a numeric array + containing a NaN element). + + Returns + ------- + return_values : bool + Whether or not we should return the original input array to avoid + data truncation. + """ + if seen_null and seen_uint: + if not coerce_numeric: + return True + else: + raise ValueError("uint64 array detected, and such an " + "array cannot contain NaN.") + + return False -def maybe_convert_numeric(object[:] values, set na_values, +cdef inline bint _check_uint64_int64_conflict(bint seen_sint, bint seen_uint, + bint coerce_numeric) except -1: + """ + Check whether we have encountered both int64 and uint64 elements. + + If both have been encountered, we cannot safely cast to an integer + dtype since none is large enough to hold both types of elements. + + Returns + ------- + return_values : bool + Whether or not we should return the original input array to avoid + data truncation. + """ + if seen_sint and seen_uint: + if not coerce_numeric: + return True + else: + raise ValueError("uint64 and negative values detected. " + "Cannot safely return a numeric array " + "without truncating data.") + + return False + + +def maybe_convert_numeric(ndarray[object] values, set na_values, bint convert_empty=True, bint coerce_numeric=False): """ - Type inference function-- convert strings to numeric (potentially) and - convert to proper dtype array + Convert object array to a numeric array if possible. + + Parameters + ---------- + values : ndarray + Array of object elements to convert. + na_values : set + Set of values that should be interpreted as NaN. + convert_empty : bool, default True + If an empty array-like object is encountered, whether to interpret + that element as NaN or not. If set to False, a ValueError will be + raised if such an element is encountered and 'coerce_numeric' is False. + coerce_numeric : bool, default False + If initial attempts to convert to numeric have failed, whether to + force conversion to numeric via alternative methods or by setting the + element to NaN. Otherwise, an Exception will be raised when such an + element is encountered. + + This boolean also has an impact on how conversion behaves when a + numeric array has no suitable numerical dtype to return (i.e. uint64, + int32, uint8). If set to False, the original object array will be + returned. Otherwise, a ValueError will be raised. + + Returns + ------- + numeric_array : array of converted object values to numerical ones """ cdef: int status, maybe_int @@ -643,7 +715,11 @@ def maybe_convert_numeric(object[:] values, set na_values, ndarray[float64_t] floats = np.empty(n, dtype='f8') ndarray[complex128_t] complexes = np.empty(n, dtype='c16') ndarray[int64_t] ints = np.empty(n, dtype='i8') + ndarray[uint64_t] uints = np.empty(n, dtype='u8') ndarray[uint8_t] bools = np.empty(n, dtype='u1') + bint seen_null = False + bint seen_uint = False + bint seen_sint = False bint seen_float = False bint seen_complex = False bint seen_int = False @@ -655,22 +731,60 @@ def maybe_convert_numeric(object[:] values, set na_values, val = values[i] if val.__hash__ is not None and val in na_values: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = complexes[i] = nan seen_float = True elif util.is_float_object(val): + if val != val: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = complexes[i] = val seen_float = True elif util.is_integer_object(val): - floats[i] = ints[i] = val + floats[i] = complexes[i] = val + as_int = int(val) seen_int = True + + seen_uint = seen_uint or (as_int > iINT64_MAX) + seen_sint = seen_sint or (as_int < 0) + + if (_check_uint64_nan(seen_uint, seen_null, coerce_numeric) or + _check_uint64_int64_conflict(seen_sint, seen_uint, + coerce_numeric)): + return values + + if seen_uint: + uints[i] = as_int + elif seen_sint: + ints[i] = as_int + else: + uints[i] = as_int + ints[i] = as_int elif util.is_bool_object(val): - floats[i] = ints[i] = bools[i] = val + floats[i] = uints[i] = ints[i] = bools[i] = val seen_bool = True elif val is None: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = complexes[i] = nan seen_float = True elif hasattr(val, '__len__') and len(val) == 0: if convert_empty or coerce_numeric: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = complexes[i] = nan seen_float = True else: @@ -686,24 +800,61 @@ def maybe_convert_numeric(object[:] values, set na_values, status = floatify(val, &fval, &maybe_int) if fval in na_values: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = complexes[i] = nan seen_float = True else: + if fval != fval: + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values + floats[i] = fval - if not seen_float: - if maybe_int: - as_int = int(val) + if maybe_int: + as_int = int(val) - if as_int <= iINT64_MAX and as_int >= iINT64_MIN: + if as_int in na_values: + seen_float = True + seen_null = True + else: + seen_uint = seen_uint or (as_int > iINT64_MAX) + seen_sint = seen_sint or (as_int < 0) + seen_int = True + + if (_check_uint64_nan(seen_uint, seen_null, + coerce_numeric) or + _check_uint64_int64_conflict(seen_sint, seen_uint, + coerce_numeric)): + return values + + if not (seen_float or as_int in na_values): + if as_int < iINT64_MIN or as_int > iUINT64_MAX: + raise ValueError('Integer out of range.') + + if seen_uint: + uints[i] = as_int + elif seen_sint: ints[i] = as_int else: - raise ValueError('integer out of range') - else: - seen_float = True + uints[i] = as_int + ints[i] = as_int + else: + seen_float = True except (TypeError, ValueError) as e: if not coerce_numeric: raise type(e)(str(e) + ' at position {}'.format(i)) + elif "uint64" in str(e): # Exception from check functions. + raise + seen_null = True + if _check_uint64_nan(seen_uint, seen_null, + coerce_numeric): + return values floats[i] = nan seen_float = True @@ -713,9 +864,14 @@ def maybe_convert_numeric(object[:] values, set na_values, elif seen_float: return floats elif seen_int: - return ints + if seen_uint: + return uints + else: + return ints elif seen_bool: return bools.view(np.bool_) + elif seen_uint: + return uints return ints @@ -810,7 +966,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats[i] = <float64_t> val complexes[i] = <double complex> val if not seen_null: - seen_uint = seen_uint or (int(val) > npy_int64_max) + seen_uint = seen_uint or (int(val) > iINT64_MAX) seen_sint = seen_sint or (val < 0) if seen_uint and seen_sint: diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index e79475ff91dca..796c77354d6f8 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -255,6 +255,59 @@ def test_convert_non_hashable(self): result = lib.maybe_convert_numeric(arr, set(), False, True) tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan])) + def test_convert_numeric_uint64(self): + arr = np.array([2**63], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + + arr = np.array([str(2**63)], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + + arr = np.array([np.uint64(2**63)], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp) + + def test_convert_numeric_uint64_nan(self): + msg = 'uint64 array detected' + cases = [(np.array([2**63, np.nan], dtype=object), set()), + (np.array([str(2**63), np.nan], dtype=object), set()), + (np.array([np.nan, 2**63], dtype=object), set()), + (np.array([np.nan, str(2**63)], dtype=object), set()), + (np.array([2**63, 2**63 + 1], dtype=object), set([2**63])), + (np.array([str(2**63), str(2**63 + 1)], + dtype=object), set([2**63]))] + + for coerce in (True, False): + for arr, na_values in cases: + if coerce: + with tm.assertRaisesRegexp(ValueError, msg): + lib.maybe_convert_numeric(arr, na_values, + coerce_numeric=coerce) + else: + tm.assert_numpy_array_equal(lib.maybe_convert_numeric( + arr, na_values), arr) + + def test_convert_numeric_int64_uint64(self): + msg = 'uint64 and negative values detected' + cases = [np.array([2**63, -1], dtype=object), + np.array([str(2**63), -1], dtype=object), + np.array([str(2**63), str(-1)], dtype=object), + np.array([-1, 2**63], dtype=object), + np.array([-1, str(2**63)], dtype=object), + np.array([str(-1), str(2**63)], dtype=object)] + + for coerce in (True, False): + for case in cases: + if coerce: + with tm.assertRaisesRegexp(ValueError, msg): + print(case) + lib.maybe_convert_numeric(case, set(), + coerce_numeric=coerce) + else: + tm.assert_numpy_array_equal(lib.maybe_convert_numeric( + case, set()), case) + def test_maybe_convert_objects_uint64(self): # see gh-4471 arr = np.array([2**63], dtype=object)
Add handling for `uint64` elements in an array with the follow behavior specifications: 1) If `uint64` and `NaN` are both detected, the original input will be returned if `coerce_numeric` is `False`. Otherwise, an `Exception` is raised. 2) If `uint64` and negative numbers are both detected, the original input be returned if `coerce_numeric` is `False`. Otherwise, an `Exception` is raised. Closes #14982. Partial fix for #14983.
https://api.github.com/repos/pandas-dev/pandas/pulls/15005
2016-12-28T19:54:53Z
2016-12-30T18:53:09Z
null
2016-12-30T18:54:26Z
DOC: Fix df.resample docstring example
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 77c2699f5a432..8ce4c4b00454b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4229,11 +4229,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] #select first 5 rows - 2000-01-01 00:00:00 0 + 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN - 2000-01-01 00:01:00 1 + 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN - 2000-01-01 00:02:00 2 + 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN``
- [x] passes ``git diff upstream/master | flake8 --diff``
https://api.github.com/repos/pandas-dev/pandas/pulls/15004
2016-12-28T16:56:10Z
2016-12-28T23:43:47Z
2016-12-28T23:43:47Z
2016-12-28T23:43:59Z
ERR: qcut uniquess checking (try 2)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0873e4b34b0b1..64c5de6cb100a 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -105,6 +105,7 @@ Other enhancements of sorting or an incorrect key. See :ref:`here <advanced.unsorted>` - ``pd.cut`` and ``pd.qcut`` now support datetime64 and timedelta64 dtypes (:issue:`14714`, :issue:`14798`) +- ``pd.qcut`` has gained the ``duplicates='raise'|'drop'`` option to control whether to raise on duplicated edges (:issue:`7751`) - ``Series`` provides a ``to_excel`` method to output Excel files (:issue:`8825`) - The ``usecols`` argument in ``pd.read_csv`` now accepts a callable function as a value (:issue:`14154`) - ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index c9a96d80f35ba..8b180957801f9 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -272,6 +272,18 @@ def test_series_retbins(self): np.array([0, 0, 1, 1], dtype=np.int8)) tm.assert_numpy_array_equal(bins, np.array([0, 1.5, 3])) + def test_qcut_duplicates_drop(self): + # GH 7751 + values = [0, 0, 0, 0, 1, 2, 3] + cats = qcut(values, 3, duplicates='drop') + ex_levels = ['[0, 1]', '(1, 3]'] + self.assertTrue((cats.categories == ex_levels).all()) + + def test_qcut_duplicates_raise(self): + # GH 7751 + values = [0, 0, 0, 0, 1, 2, 3] + self.assertRaises(ValueError, qcut, values, 3, duplicates='raise') + def test_single_bin(self): # issue 14652 expected = Series([0, 0]) diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index a372e113f1d7e..2875d9c14dc47 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -129,7 +129,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, series_index, name) -def qcut(x, q, labels=None, retbins=False, precision=3): +def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example @@ -151,6 +151,10 @@ def qcut(x, q, labels=None, retbins=False, precision=3): as a scalar. precision : int The precision at which to store and display the bins labels + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + + .. versionadded:: 0.20.0 Returns ------- @@ -187,7 +191,7 @@ def qcut(x, q, labels=None, retbins=False, precision=3): bins = algos.quantile(x, quantiles) fac, bins = _bins_to_cuts(x, bins, labels=labels, precision=precision, include_lowest=True, - dtype=dtype) + dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name) @@ -195,14 +199,24 @@ def qcut(x, q, labels=None, retbins=False, precision=3): def _bins_to_cuts(x, bins, right=True, labels=None, precision=3, include_lowest=False, - dtype=None): + dtype=None, duplicates='raise'): + + if duplicates not in ['raise', 'drop']: + raise ValueError("invalid value for 'duplicates' parameter, " + "valid options are: raise, drop") + + unique_bins = algos.unique(bins) + if len(unique_bins) < len(bins): + if duplicates == 'raise': + raise ValueError("Bin edges must be unique: {}. You " + "can drop duplicate edges by setting " + "'duplicates' param".format(repr(bins))) + else: + bins = unique_bins side = 'left' if right else 'right' ids = bins.searchsorted(x, side=side) - if len(algos.unique(bins)) < len(bins): - raise ValueError('Bin edges must be unique: %s' % repr(bins)) - if include_lowest: ids[x == bins[0]] = 1
- [x] closes #7751 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry Add option to drop non-unique bins.
https://api.github.com/repos/pandas-dev/pandas/pulls/15000
2016-12-27T22:56:15Z
2016-12-30T19:02:01Z
null
2016-12-30T19:02:08Z
BUG: Fixed to_html with index=False and max_rows
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0873e4b34b0b1..5e27c6e9cc293 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -313,6 +313,12 @@ Bug Fixes +- Bug in ``DataFrame.to_html`` with ``index=False`` and ``max_rows`` raising in ``IndexError`` (:issue:`14998`) + + + + + diff --git a/pandas/formats/format.py b/pandas/formats/format.py index 0cf6050e515e0..a3319437474c2 100644 --- a/pandas/formats/format.py +++ b/pandas/formats/format.py @@ -1182,7 +1182,7 @@ def _write_body(self, indent): else: self._write_regular_rows(fmt_values, indent) else: - for i in range(len(self.frame)): + for i in range(min(len(self.frame), self.max_rows)): row = [fmt_values[j][i] for j in range(len(self.columns))] self.write_tr(row, indent, self.indent_delta, tags=None) diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py index e7c32a4baa4ea..00e5e002ca48d 100644 --- a/pandas/tests/formats/test_format.py +++ b/pandas/tests/formats/test_format.py @@ -2771,6 +2771,25 @@ def test_to_html_with_classes(self): result = df.to_html(classes=["sortable", "draggable"]) self.assertEqual(result, expected) + def test_to_html_no_index_max_rows(self): + # GH https://github.com/pandas-dev/pandas/issues/14998 + df = DataFrame({"A": [1, 2, 3, 4]}) + result = df.to_html(index=False, max_rows=1) + expected = dedent("""\ + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th>A</th> + </tr> + </thead> + <tbody> + <tr> + <td>1</td> + </tr> + </tbody> + </table>""") + self.assertEqual(result, expected) + def test_pprint_pathological_object(self): """ if the test fails, the stack will overflow and nose crash,
closes https://github.com/pandas-dev/pandas/issues/14998 Previously raised an IndexError by assuming that `len(fmt_values)` was always the same length as `len(self.data)`.
https://api.github.com/repos/pandas-dev/pandas/pulls/14999
2016-12-27T21:54:58Z
2016-12-30T19:43:12Z
null
2016-12-31T10:06:20Z
DOC: consistent import timedeltas docs
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 7d0fa61b1bdac..f7aa879fa7216 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -4,18 +4,17 @@ .. ipython:: python :suppress: - from datetime import datetime, timedelta + import datetime import numpy as np + import pandas as pd np.random.seed(123456) - from pandas import * randn = np.random.randn randint = np.random.randint np.set_printoptions(precision=4, suppress=True) - options.display.max_rows=15 + pd.options.display.max_rows=15 import dateutil import pytz from dateutil.relativedelta import relativedelta - from pandas.tseries.api import * from pandas.tseries.offsets import * .. _timedeltas.timedeltas: @@ -40,41 +39,41 @@ You can construct a ``Timedelta`` scalar through various arguments: .. ipython:: python # strings - Timedelta('1 days') - Timedelta('1 days 00:00:00') - Timedelta('1 days 2 hours') - Timedelta('-1 days 2 min 3us') + pd.Timedelta('1 days') + pd.Timedelta('1 days 00:00:00') + pd.Timedelta('1 days 2 hours') + pd.Timedelta('-1 days 2 min 3us') # like datetime.timedelta # note: these MUST be specified as keyword arguments - Timedelta(days=1, seconds=1) + pd.Timedelta(days=1, seconds=1) # integers with a unit - Timedelta(1, unit='d') + pd.Timedelta(1, unit='d') - # from a timedelta/np.timedelta64 - Timedelta(timedelta(days=1, seconds=1)) - Timedelta(np.timedelta64(1, 'ms')) + # from a datetime.timedelta/np.timedelta64 + pd.Timedelta(datetime.timedelta(days=1, seconds=1)) + pd.Timedelta(np.timedelta64(1, 'ms')) # negative Timedeltas have this string repr # to be more consistent with datetime.timedelta conventions - Timedelta('-1us') + pd.Timedelta('-1us') # a NaT - Timedelta('nan') - Timedelta('nat') + pd.Timedelta('nan') + pd.Timedelta('nat') :ref:`DateOffsets<timeseries.offsets>` (``Day, Hour, Minute, Second, Milli, Micro, Nano``) can also be used in construction. .. ipython:: python - Timedelta(Second(2)) + pd.Timedelta(Second(2)) Further, operations among the scalars yield another scalar ``Timedelta``. .. ipython:: python - Timedelta(Day(2)) + Timedelta(Second(2)) + Timedelta('00:00:00.000123') + pd.Timedelta(Day(2)) + pd.Timedelta(Second(2)) + pd.Timedelta('00:00:00.000123') to_timedelta ~~~~~~~~~~~~ @@ -93,21 +92,21 @@ You can parse a single string to a Timedelta: .. ipython:: python - to_timedelta('1 days 06:05:01.00003') - to_timedelta('15.5us') + pd.to_timedelta('1 days 06:05:01.00003') + pd.to_timedelta('15.5us') or a list/array of strings: .. ipython:: python - to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) + pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) The ``unit`` keyword argument specifies the unit of the Timedelta: .. ipython:: python - to_timedelta(np.arange(5), unit='s') - to_timedelta(np.arange(5), unit='d') + pd.to_timedelta(np.arange(5), unit='s') + pd.to_timedelta(np.arange(5), unit='d') .. _timedeltas.limitations: @@ -133,17 +132,17 @@ subtraction operations on ``datetime64[ns]`` Series, or ``Timestamps``. .. ipython:: python - s = Series(date_range('2012-1-1', periods=3, freq='D')) - td = Series([ Timedelta(days=i) for i in range(3) ]) - df = DataFrame(dict(A = s, B = td)) + s = pd.Series(pd.date_range('2012-1-1', periods=3, freq='D')) + td = pd.Series([ pd.Timedelta(days=i) for i in range(3) ]) + df = pd.DataFrame(dict(A = s, B = td)) df df['C'] = df['A'] + df['B'] df df.dtypes s - s.max() - s - datetime(2011, 1, 1, 3, 5) - s + timedelta(minutes=5) + s - datetime.datetime(2011, 1, 1, 3, 5) + s + datetime.timedelta(minutes=5) s + Minute(5) s + Minute(5) + Milli(5) @@ -173,17 +172,17 @@ Operands can also appear in a reversed order (a singular object operated with a .. ipython:: python s.max() - s - datetime(2011, 1, 1, 3, 5) - s - timedelta(minutes=5) + s + datetime.datetime(2011, 1, 1, 3, 5) - s + datetime.timedelta(minutes=5) + s ``min, max`` and the corresponding ``idxmin, idxmax`` operations are supported on frames: .. ipython:: python - A = s - Timestamp('20120101') - Timedelta('00:05:05') - B = s - Series(date_range('2012-1-2', periods=3, freq='D')) + A = s - pd.Timestamp('20120101') - pd.Timedelta('00:05:05') + B = s - pd.Series(pd.date_range('2012-1-2', periods=3, freq='D')) - df = DataFrame(dict(A=A, B=B)) + df = pd.DataFrame(dict(A=A, B=B)) df df.min() @@ -209,13 +208,13 @@ pass a timedelta to get a particular value. y.fillna(0) y.fillna(10) - y.fillna(Timedelta('-1 days, 00:00:05')) + y.fillna(pd.Timedelta('-1 days, 00:00:05')) You can also negate, multiply and use ``abs`` on ``Timedeltas``: .. ipython:: python - td1 = Timedelta('-1 days 2 hours 3 seconds') + td1 = pd.Timedelta('-1 days 2 hours 3 seconds') td1 -1 * td1 - td1 @@ -231,7 +230,7 @@ Numeric reduction operation for ``timedelta64[ns]`` will return ``Timedelta`` ob .. ipython:: python - y2 = Series(to_timedelta(['-1 days +00:00:05', 'nat', '-1 days +00:00:05', '1 days'])) + y2 = pd.Series(pd.to_timedelta(['-1 days +00:00:05', 'nat', '-1 days +00:00:05', '1 days'])) y2 y2.mean() y2.median() @@ -251,9 +250,9 @@ Note that division by the numpy scalar is true division, while astyping is equiv .. ipython:: python - td = Series(date_range('20130101', periods=4)) - \ - Series(date_range('20121201', periods=4)) - td[2] += timedelta(minutes=5, seconds=3) + td = pd.Series(pd.date_range('20130101', periods=4)) - \ + pd.Series(pd.date_range('20121201', periods=4)) + td[2] += datetime.timedelta(minutes=5, seconds=3) td[3] = np.nan td @@ -274,7 +273,7 @@ yields another ``timedelta64[ns]`` dtypes Series. .. ipython:: python td * -1 - td * Series([1, 2, 3, 4]) + td * pd.Series([1, 2, 3, 4]) Attributes ---------- @@ -298,7 +297,7 @@ You can access the value of the fields for a scalar ``Timedelta`` directly. .. ipython:: python - tds = Timedelta('31 days 5 min 3 sec') + tds = pd.Timedelta('31 days 5 min 3 sec') tds.days tds.seconds (-tds).seconds @@ -326,15 +325,15 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss .. ipython:: python - TimedeltaIndex(['1 days', '1 days, 00:00:05', - np.timedelta64(2,'D'), timedelta(days=2,seconds=2)]) + pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', + np.timedelta64(2,'D'), datetime.timedelta(days=2,seconds=2)]) Similarly to ``date_range``, you can construct regular ranges of a ``TimedeltaIndex``: .. ipython:: python - timedelta_range(start='1 days', periods=5, freq='D') - timedelta_range(start='1 days', end='2 days', freq='30T') + pd.timedelta_range(start='1 days', periods=5, freq='D') + pd.timedelta_range(start='1 days', end='2 days', freq='30T') Using the TimedeltaIndex ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -344,8 +343,8 @@ Similarly to other of the datetime-like indices, ``DatetimeIndex`` and ``PeriodI .. ipython:: python - s = Series(np.arange(100), - index=timedelta_range('1 days', periods=100, freq='h')) + s = pd.Series(np.arange(100), + index=pd.timedelta_range('1 days', periods=100, freq='h')) s Selections work similarly, with coercion on string-likes and slices: @@ -354,7 +353,7 @@ Selections work similarly, with coercion on string-likes and slices: s['1 day':'2 day'] s['1 day 01:00:00'] - s[Timedelta('1 day 1h')] + s[pd.Timedelta('1 day 1h')] Furthermore you can use partial string selection and the range will be inferred: @@ -369,9 +368,9 @@ Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow cert .. ipython:: python - tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days']) + tdi = pd.TimedeltaIndex(['1 days', pd.NaT, '2 days']) tdi.tolist() - dti = date_range('20130101', periods=3) + dti = pd.date_range('20130101', periods=3) dti.tolist() (dti + tdi).tolist() (dti - tdi).tolist() @@ -391,14 +390,14 @@ Scalars type ops work as well. These can potentially return a *different* type o .. ipython:: python # adding or timedelta and date -> datelike - tdi + Timestamp('20130101') + tdi + pd.Timestamp('20130101') # subtraction of a date and a timedelta -> datelike # note that trying to subtract a date from a Timedelta will raise an exception - (Timestamp('20130101') - tdi).tolist() + (pd.Timestamp('20130101') - tdi).tolist() # timedelta + timedelta -> timedelta - tdi + Timedelta('10 days') + tdi + pd.Timedelta('10 days') # division can result in a Timedelta if the divisor is an integer tdi / 2
Added consistent import for timedeltas docs, i.e.: `import pandas as pd`, `import datetime`. See: https://github.com/pandas-dev/pandas/issues/9886 Didn't touch the `pandas.tseries.offsets` since no consensus has been made how to import those.
https://api.github.com/repos/pandas-dev/pandas/pulls/14997
2016-12-27T15:17:38Z
2016-12-27T16:24:52Z
2016-12-27T16:24:52Z
2016-12-27T16:25:23Z
BUG: fixed index power operation
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0873e4b34b0b1..8bd794983375d 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -282,6 +282,7 @@ Performance Improvements Bug Fixes ~~~~~~~~~ +- Bug in ``Index`` power operations with reversed operands(:issue:`14973`) - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) - Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index 1cc546629589d..1fce5ed17c8cf 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -3534,7 +3534,9 @@ def _evaluate_numeric_binop(self, other): operator.sub, '__sub__', reversed=True) cls.__mul__ = cls.__rmul__ = _make_evaluate_binop( operator.mul, '__mul__') - cls.__pow__ = cls.__rpow__ = _make_evaluate_binop( + cls.__rpow__ = _make_evaluate_binop( + operator.pow, '__pow__', reversed=True) + cls.__pow__ = _make_evaluate_binop( operator.pow, '__pow__') cls.__mod__ = _make_evaluate_binop( operator.mod, '__mod__') diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index b362c9716b672..f7f072d5b5d2a 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -105,6 +105,15 @@ def test_numeric_compat(self): for r, e in zip(result, expected): tm.assert_index_equal(r, e) + # test power calculations both ways, GH 14973 + expected = pd.Float64Index(2.0**idx.values) + result = 2.0**idx + tm.assert_index_equal(result, expected) + + expected = pd.Float64Index(idx.values**2.0) + result = idx**2.0 + tm.assert_index_equal(result, expected) + def test_explicit_conversions(self): # GH 8608
* The power operation on a range of indexes was fixed. See issue #14973 - [ ] closes #14973 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14996
2016-12-27T05:37:58Z
2016-12-30T19:14:33Z
null
2016-12-30T19:14:38Z
DOC: Clarified and expanded describe documentation
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 77c2699f5a432..bdb369810e5b3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5201,60 +5201,222 @@ def abs(self): """ return np.abs(self) - _shared_docs['describe'] = """ - Generate various summary statistics, excluding NaN values. + def describe(self, percentiles=None, include=None, exclude=None): + """ + Generates descriptive statistics that summarize the central tendency, + dispersion and shape of a dataset's distribution, excluding + ``NaN`` values. + + Analyzes both numeric and object series, as well + as ``DataFrame`` column sets of mixed data types. The output + will vary depending on what is provided. Refer to the notes + below for more detail. Parameters ---------- - percentiles : array-like, optional - The percentiles to include in the output. Should all - be in the interval [0, 1]. By default `percentiles` is - [.25, .5, .75], returning the 25th, 50th, and 75th percentiles. - include, exclude : list-like, 'all', or None (default) - Specify the form of the returned result. Either: - - - None to both (default). The result will include only - numeric-typed columns or, if none are, only categorical columns. - - A list of dtypes or strings to be included/excluded. - To select all numeric types use numpy numpy.number. To select - categorical objects use type object. See also the select_dtypes - documentation. eg. df.describe(include=['O']) - - If include is the string 'all', the output column-set will - match the input one. + percentiles : list-like of numbers, optional + The percentiles to include in the output. All should + fall between 0 and 1. The default is + ``[.25, .5, .75]``, which returns the 25th, 50th, and + 75th percentiles. + include : 'all', list-like of dtypes or None (default), optional + A white list of data types to include in the result. Ignored + for ``Series``. Here are the options: + + - 'all' : All columns of the input will be included in the output. + - A list-like of dtypes : Limits the results to the + provided data types. + To limit the result to numeric types submit + ``numpy.number``. To limit it instead to categorical + objects submit the ``numpy.object`` data type. Strings + can also be used in the style of + ``select_dtypes`` (e.g. ``df.describe(include=['O'])``) + - None (default) : The result will include all numeric columns. + exclude : list-like of dtypes or None (default), optional, + A black list of data types to omit from the result. Ignored + for ``Series``. Here are the options: + + - A list-like of dtypes : Excludes the provided data types + from the result. To select numeric types submit + ``numpy.number``. To select categorical objects submit the data + type ``numpy.object``. Strings can also be used in the style of + ``select_dtypes`` (e.g. ``df.describe(include=['O'])``) + - None (default) : The result will exclude nothing. Returns ------- - summary: %(klass)s of summary statistics + summary: Series/DataFrame of summary statistics Notes ----- - The output DataFrame index depends on the requested dtypes: - - For numeric dtypes, it will include: count, mean, std, min, - max, and lower, 50, and upper percentiles. + For numeric data, the result's index will include ``count``, + ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and + upper percentiles. By default the lower percentile is ``25`` and the + upper percentile is ``75``. The ``50`` percentile is the + same as the median. + + For object data (e.g. strings or timestamps), the result's index + will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` + is the most common value. The ``freq`` is the most common value's + frequency. Timestamps also include the ``first`` and ``last`` items. + + If multiple object values have the highest count, then the + ``count`` and ``top`` results will be arbitrarily chosen from + among those with the highest count. - For object dtypes (e.g. timestamps or strings), the index - will include the count, unique, most common, and frequency of the - most common. Timestamps also include the first and last items. + For mixed data types provided via a ``DataFrame``, the default is to + return only an analysis of numeric columns. If ``include='all'`` + is provided as an option, the result will include a union of + attributes of each type. - For mixed dtypes, the index will be the union of the corresponding - output types. Non-applicable entries will be filled with NaN. - Note that mixed-dtype outputs can only be returned from mixed-dtype - inputs and appropriate use of the include/exclude arguments. + The `include` and `exclude` parameters can be used to limit + which columns in a ``DataFrame`` are analyzed for the output. + The parameters are ignored when analyzing a ``Series``. - If multiple values have the highest count, then the - `count` and `most common` pair will be arbitrarily chosen from - among those with the highest count. + Examples + -------- + Describing a numeric ``Series``. - The include, exclude arguments are ignored for Series. + >>> import pandas as pd + >>> s = pd.Series([1, 2, 3]) + >>> s.describe() + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + + Describing a categorical ``Series``. + + >>> s = pd.Series(['a', 'a', 'b', 'c']) + >>> s.describe() + count 4 + unique 3 + top a + freq 2 + dtype: object + + Describing a timestamp ``Series``. + + >>> import numpy as np + >>> s = pd.Series([ + ... np.datetime64("2000-01-01"), + ... np.datetime64("2010-01-01"), + ... np.datetime64("2010-01-01") + ... ]) + >>> s.describe() + count 3 + unique 2 + top 2010-01-01 00:00:00 + freq 2 + first 2000-01-01 00:00:00 + last 2010-01-01 00:00:00 + dtype: object + + Describing a ``DataFrame``. By default only numeric fields + are returned. + + >>> df = pd.DataFrame( + ... [[1, 'a'], [2, 'b'], [3, 'c']], + ... columns=['numeric', 'object'] + ... ) + >>> df.describe() + numeric + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + + Describing all columns of a ``DataFrame`` regardless of data type. + + >>> df.describe(include='all') + numeric object + count 3.0 3 + unique NaN 3 + top NaN b + freq NaN 1 + mean 2.0 NaN + std 1.0 NaN + min 1.0 NaN + 25% 1.5 NaN + 50% 2.0 NaN + 75% 2.5 NaN + max 3.0 NaN + + Describing a column from a ``DataFrame`` by accessing it as + an attribute. + + >>> df.numeric.describe() + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + Name: numeric, dtype: float64 + + Including only numeric columns in a ``DataFrame`` description. + + >>> df.describe(include=[np.number]) + numeric + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + + Including only string columns in a ``DataFrame`` description. + + >>> df.describe(include=[np.object]) + object + count 3 + unique 3 + top b + freq 1 + + Excluding numeric columns from a ``DataFrame`` description. + + >>> df.describe(exclude=[np.number]) + object + count 3 + unique 3 + top b + freq 1 + + Excluding object columns from a ``DataFrame`` description. + + >>> df.describe(exclude=[np.object]) + numeric + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 See Also -------- + DataFrame.count + DataFrame.max + DataFrame.min + DataFrame.mean + DataFrame.std DataFrame.select_dtypes """ - - @Appender(_shared_docs['describe'] % _shared_doc_kwargs) - def describe(self, percentiles=None, include=None, exclude=None): if self.ndim >= 3: msg = "describe is not implemented on Panel or PanelND objects." raise NotImplementedError(msg)
- [x] passes ``git diff upstream/master | flake8 --diff`` As discussed in #14483.
https://api.github.com/repos/pandas-dev/pandas/pulls/14995
2016-12-27T04:49:17Z
2017-01-02T09:05:47Z
2017-01-02T09:05:47Z
2017-01-02T09:05:55Z
Bug: Raise ValueError with interpolate & fillna limit = 0 (#9217)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 6fe066b08e255..bb0d6003c28b7 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -418,6 +418,7 @@ Other API Changes - ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`) - ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) - ``.loc`` has compat with ``.ix`` for accepting iterators, and NamedTuples (:issue:`15120`) +- ``interpolate()`` and ``fillna()`` will raise a ``ValueError`` if the ``limit`` keyword argument is not greater than 0. (:issue:`9217`) - ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`) - ``pd.read_csv()`` will now raise a ``ValueError`` for the C engine if the quote character is larger than than one byte (:issue:`11592`) - ``inplace`` arguments now require a boolean value, else a ``ValueError`` is thrown (:issue:`14189`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 228dd2acd2124..20e6e027dbf09 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3262,7 +3262,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be - filled. + filled. Must be greater than 0 if not None. downcast : dict, default is None a dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate @@ -3281,6 +3281,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): inplace = validate_bool_kwarg(inplace, 'inplace') + if isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) @@ -3292,7 +3293,6 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, axis = 0 axis = self._get_axis_number(axis) method = missing.clean_fill_method(method) - from pandas import DataFrame if value is None: if method is None: @@ -3687,7 +3687,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, * 0: fill column-by-column * 1: fill row-by-row limit : int, default None. - Maximum number of consecutive NaNs to fill. + Maximum number of consecutive NaNs to fill. Must be greater than 0. limit_direction : {'forward', 'backward', 'both'}, default 'forward' If limit is specified, consecutive NaNs will be filled in this direction. diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f0b1516d786c6..6cd5eceed5f2a 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -372,6 +372,10 @@ def fillna(self, value, limit=None, inplace=False, downcast=None, original_value = value mask = isnull(self.values) if limit is not None: + if not is_integer(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") diff --git a/pandas/core/missing.py b/pandas/core/missing.py index e83a0518d97f6..ffd0423572f5e 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -12,7 +12,7 @@ is_float_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_integer_dtype, _ensure_float64, is_scalar, - needs_i8_conversion) + needs_i8_conversion, is_integer) from pandas.types.missing import isnull @@ -169,7 +169,11 @@ def _interp_limit(invalid, fw_limit, bw_limit): # the beginning (see issues #9218 and #10420) violate_limit = sorted(start_nans) - if limit: + if limit is not None: + if not is_integer(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') if limit_direction == 'forward': violate_limit = sorted(start_nans | set(_interp_limit(invalid, limit, 0))) diff --git a/pandas/src/algos_common_helper.pxi.in b/pandas/src/algos_common_helper.pxi.in index 5e87528943005..42089f9520ab6 100644 --- a/pandas/src/algos_common_helper.pxi.in +++ b/pandas/src/algos_common_helper.pxi.in @@ -83,8 +83,10 @@ def pad_{{name}}(ndarray[{{c_type}}] old, ndarray[{{c_type}}] new, if limit is None: lim = nright else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit if nleft == 0 or nright == 0 or new[nright - 1] < old[0]: @@ -146,8 +148,10 @@ def pad_inplace_{{name}}(ndarray[{{c_type}}] values, if limit is None: lim = N else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit val = values[0] @@ -180,8 +184,10 @@ def pad_2d_inplace_{{name}}(ndarray[{{c_type}}, ndim=2] values, if limit is None: lim = N else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit for j in range(K): @@ -240,8 +246,10 @@ def backfill_{{name}}(ndarray[{{c_type}}] old, ndarray[{{c_type}}] new, if limit is None: lim = nright else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]: @@ -304,8 +312,10 @@ def backfill_inplace_{{name}}(ndarray[{{c_type}}] values, if limit is None: lim = N else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit val = values[N - 1] @@ -338,8 +348,10 @@ def backfill_2d_inplace_{{name}}(ndarray[{{c_type}}, ndim=2] values, if limit is None: lim = N else: - if limit < 0: - raise ValueError('Limit must be non-negative') + if not util.is_integer_object(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') lim = limit for j in range(K): diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 702fa2acb5106..aae14905d375b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -295,6 +295,13 @@ def test_fillna_raise(self): self.assertRaises(TypeError, s.fillna, [1, 2]) self.assertRaises(TypeError, s.fillna, (1, 2)) + # related GH 9217, make sure limit is an int and greater than 0 + s = Series([1, 2, 3, None]) + for limit in [-1, 0, 1., 2.]: + for method in ['backfill', 'bfill', 'pad', 'ffill', None]: + with tm.assertRaises(ValueError): + s.fillna(1, limit=limit, method=method) + def test_fillna_nat(self): series = Series([0, 1, 2, tslib.iNaT], dtype='M8[ns]') @@ -865,6 +872,17 @@ def test_interp_limit(self): result = s.interpolate(method='linear', limit=2) assert_series_equal(result, expected) + # GH 9217, make sure limit is an int and greater than 0 + methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero', + 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', + 'polynomial', 'spline', 'piecewise_polynomial', None, + 'from_derivatives', 'pchip', 'akima'] + s = pd.Series([1, 2, np.nan, np.nan, 5]) + for limit in [-1, 0, 1., 2.]: + for method in methods: + with tm.assertRaises(ValueError): + s.interpolate(limit=limit, method=method) + def test_interp_limit_forward(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11])
- [x] closes #9217 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry Add similar logic to `fillna`
https://api.github.com/repos/pandas-dev/pandas/pulls/14994
2016-12-26T22:50:36Z
2017-02-14T13:34:23Z
null
2017-12-20T02:04:10Z
BUG: Reindex with columns and method
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 40bd8bc4154a6..21091d87dbfd7 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -285,6 +285,7 @@ Bug Fixes - Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`) - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) - Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`) +- Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`) - Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ba1e08ecc482f..7d2486b229e69 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2710,8 +2710,8 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, columns = axes['columns'] if columns is not None: - frame = frame._reindex_columns(columns, copy, level, fill_value, - limit, tolerance) + frame = frame._reindex_columns(columns, method, copy, level, + fill_value, limit, tolerance) index = axes['index'] if index is not None: @@ -2722,17 +2722,17 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, def _reindex_index(self, new_index, method, copy, level, fill_value=NA, limit=None, tolerance=None): - new_index, indexer = self.index.reindex(new_index, method, level, - limit=limit, + new_index, indexer = self.index.reindex(new_index, method=method, + level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) - def _reindex_columns(self, new_columns, copy, level, fill_value=NA, + def _reindex_columns(self, new_columns, method, copy, level, fill_value=NA, limit=None, tolerance=None): - new_columns, indexer = self.columns.reindex(new_columns, level=level, - limit=limit, + new_columns, indexer = self.columns.reindex(new_columns, method=method, + level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 56020e32b9963..e3662ea5effd5 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -571,8 +571,8 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, new_series, index=index, columns=self.columns, default_fill_value=self._default_fill_value).__finalize__(self) - def _reindex_columns(self, columns, copy, level, fill_value, limit=None, - takeable=False): + def _reindex_columns(self, columns, method, copy, level, fill_value=None, + limit=None, takeable=False): if level is not None: raise TypeError('Reindex by level not supported for sparse') @@ -582,6 +582,9 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None, if limit: raise NotImplementedError("'limit' argument is not supported") + if notnull(method): + raise NotImplementedError("'method' argument is not supported") + # TODO: fill value handling sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns) return self._constructor( diff --git a/pandas/sparse/tests/test_frame.py b/pandas/sparse/tests/test_frame.py index ab12099b5624d..83b6a89811ee6 100644 --- a/pandas/sparse/tests/test_frame.py +++ b/pandas/sparse/tests/test_frame.py @@ -799,6 +799,76 @@ def test_reindex_fill_value(self): exp = exp.to_sparse(self.zframe.default_fill_value) tm.assert_sp_frame_equal(result, exp) + def test_reindex_method(self): + + sparse = SparseDataFrame(data=[[11., 12., 14.], + [21., 22., 24.], + [41., 42., 44.]], + index=[1, 2, 4], + columns=[1, 2, 4], + dtype=float) + + # Over indices + + # default method + result = sparse.reindex(index=range(6)) + expected = SparseDataFrame(data=[[nan, nan, nan], + [11., 12., 14.], + [21., 22., 24.], + [nan, nan, nan], + [41., 42., 44.], + [nan, nan, nan]], + index=range(6), + columns=[1, 2, 4], + dtype=float) + tm.assert_sp_frame_equal(result, expected) + + # method='bfill' + result = sparse.reindex(index=range(6), method='bfill') + expected = SparseDataFrame(data=[[11., 12., 14.], + [11., 12., 14.], + [21., 22., 24.], + [41., 42., 44.], + [41., 42., 44.], + [nan, nan, nan]], + index=range(6), + columns=[1, 2, 4], + dtype=float) + tm.assert_sp_frame_equal(result, expected) + + # method='ffill' + result = sparse.reindex(index=range(6), method='ffill') + expected = SparseDataFrame(data=[[nan, nan, nan], + [11., 12., 14.], + [21., 22., 24.], + [21., 22., 24.], + [41., 42., 44.], + [41., 42., 44.]], + index=range(6), + columns=[1, 2, 4], + dtype=float) + tm.assert_sp_frame_equal(result, expected) + + # Over columns + + # default method + result = sparse.reindex(columns=range(6)) + expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan], + [nan, 21., 22., nan, 24., nan], + [nan, 41., 42., nan, 44., nan]], + index=[1, 2, 4], + columns=range(6), + dtype=float) + tm.assert_sp_frame_equal(result, expected) + + # method='bfill' + with tm.assertRaises(NotImplementedError): + sparse.reindex(columns=range(6), method='bfill') + + # method='ffill' + with tm.assertRaises(NotImplementedError): + sparse.reindex(columns=range(6), method='ffill') + def test_take(self): result = self.frame.take([1, 0, 2], axis=1) expected = self.frame.reindex(columns=['B', 'A', 'C']) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 9da1b31d259c5..ecce17f96a672 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -297,6 +297,44 @@ def test_reindex_columns(self): newFrame = self.frame.reindex(columns=[]) self.assertTrue(newFrame.empty) + def test_reindex_columns_method(self): + + # GH 14992, reindexing over columns ignored method + df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]], + index=[1, 2, 4], + columns=[1, 2, 4], + dtype=float) + + # default method + result = df.reindex(columns=range(6)) + expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan], + [np.nan, 21, 22, np.nan, 23, np.nan], + [np.nan, 31, 32, np.nan, 33, np.nan]], + index=[1, 2, 4], + columns=range(6), + dtype=float) + assert_frame_equal(result, expected) + + # method='ffill' + result = df.reindex(columns=range(6), method='ffill') + expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13], + [np.nan, 21, 22, 22, 23, 23], + [np.nan, 31, 32, 32, 33, 33]], + index=[1, 2, 4], + columns=range(6), + dtype=float) + assert_frame_equal(result, expected) + + # method='bfill' + result = df.reindex(columns=range(6), method='bfill') + expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan], + [21, 21, 22, 23, 23, np.nan], + [31, 31, 32, 33, 33, np.nan]], + index=[1, 2, 4], + columns=range(6), + dtype=float) + assert_frame_equal(result, expected) + def test_reindex_axes(self): # GH 3317, reindexing by both axes loses freq of the index df = DataFrame(np.ones((3, 3)),
- [x] closes #14992 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14993
2016-12-26T20:47:20Z
2016-12-30T19:41:22Z
null
2017-09-21T15:17:33Z
BLD: move + update build script
diff --git a/build_dist.sh b/scripts/build_dist.sh similarity index 89% rename from build_dist.sh rename to scripts/build_dist.sh index 9bd007e3e1c9f..c9c36c18bed9c 100755 --- a/build_dist.sh +++ b/scripts/build_dist.sh @@ -12,7 +12,7 @@ case ${answer:0:1} in echo "Building distribution" python setup.py clean python setup.py build_ext --inplace - python setup.py sdist --formats=zip,gztar + python setup.py sdist --formats=gztar ;; * ) echo "Not building distribution"
Only one source dist format to upload since https://www.python.org/dev/peps/pep-0527/
https://api.github.com/repos/pandas-dev/pandas/pulls/14991
2016-12-26T13:47:02Z
2016-12-26T13:47:45Z
2016-12-26T13:47:45Z
2016-12-26T14:34:35Z
ERR: MultiIndex searchsorted
diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py index 132543e0e386c..1b1c30b8d0d6b 100644 --- a/pandas/indexes/multi.py +++ b/pandas/indexes/multi.py @@ -1470,6 +1470,9 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): return _ensure_platform_int(indexer) + def searchsorted(self, value, side='left', sorter=None): + raise NotImplementedError() + def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 4e7ace4173227..33f266954c767 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2448,6 +2448,13 @@ def test_iloc_mi(self): assert_frame_equal(result, expected) + def test_searchsorted(self): + # GH 14833 + # Test if MultiIndex searchsorted raises the NotImplementedException + df_mi = pd.MultiIndex([[0], ["a"]], [[0], [0]]) + with self.assertRaises(Exception) as context: + df_mi.searchsorted((1, "b")) + self.assertTrue(context.exception,NotImplementedError()) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
- [ ] closes #14833 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14990
2016-12-26T11:45:50Z
2017-02-27T15:58:48Z
null
2023-05-11T01:14:50Z
MAINT: Abstract index helpers in pxi.in file
diff --git a/pandas/index.pyx b/pandas/index.pyx index a245e85d80f96..d575defe17422 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -363,115 +363,6 @@ cdef class IndexEngine: return result[0:count], missing[0:count_missing] -cdef class Int64Engine(IndexEngine): - - cdef _get_index_values(self): - return algos.ensure_int64(self.vgetter()) - - cdef _make_hash_table(self, n): - return _hash.Int64HashTable(n) - - def _call_monotonic(self, values): - return algos.is_monotonic_int64(values, timelike=False) - - def get_pad_indexer(self, other, limit=None): - return algos.pad_int64(self._get_index_values(), other, - limit=limit) - - def get_backfill_indexer(self, other, limit=None): - return algos.backfill_int64(self._get_index_values(), other, - limit=limit) - - cdef _check_type(self, object val): - hash(val) - if util.is_bool_object(val): - raise KeyError(val) - elif util.is_float_object(val): - raise KeyError(val) - - cdef _maybe_get_bool_indexer(self, object val): - cdef: - ndarray[uint8_t, cast=True] indexer - ndarray[int64_t] values - int count = 0 - Py_ssize_t i, n - int64_t ival - int last_true - - if not util.is_integer_object(val): - raise KeyError(val) - - ival = val - - values = self._get_index_values() - n = len(values) - - result = np.empty(n, dtype=bool) - indexer = result.view(np.uint8) - - for i in range(n): - if values[i] == val: - count += 1 - indexer[i] = 1 - last_true = i - else: - indexer[i] = 0 - - if count == 0: - raise KeyError(val) - if count == 1: - return last_true - - return result - -cdef class Float64Engine(IndexEngine): - - cdef _make_hash_table(self, n): - return _hash.Float64HashTable(n) - - cdef _get_index_values(self): - return algos.ensure_float64(self.vgetter()) - - cdef _maybe_get_bool_indexer(self, object val): - cdef: - ndarray[uint8_t] indexer - ndarray[float64_t] values - int count = 0 - Py_ssize_t i, n - int last_true - - values = self._get_index_values() - n = len(values) - - result = np.empty(n, dtype=bool) - indexer = result.view(np.uint8) - - for i in range(n): - if values[i] == val: - count += 1 - indexer[i] = 1 - last_true = i - else: - indexer[i] = 0 - - if count == 0: - raise KeyError(val) - if count == 1: - return last_true - - return result - - def _call_monotonic(self, values): - return algos.is_monotonic_float64(values, timelike=False) - - def get_pad_indexer(self, other, limit=None): - return algos.pad_float64(self._get_index_values(), other, - limit=limit) - - def get_backfill_indexer(self, other, limit=None): - return algos.backfill_float64(self._get_index_values(), other, - limit=limit) - cdef Py_ssize_t _bin_search(ndarray values, object val) except -1: cdef: @@ -510,22 +401,6 @@ _backfill_functions = { 'float64': algos.backfill_float64 } -cdef class ObjectEngine(IndexEngine): - - cdef _make_hash_table(self, n): - return _hash.PyObjectHashTable(n) - - def _call_monotonic(self, values): - return algos.is_monotonic_object(values, timelike=False) - - def get_pad_indexer(self, other, limit=None): - return algos.pad_object(self._get_index_values(), other, - limit=limit) - - def get_backfill_indexer(self, other, limit=None): - return algos.backfill_object(self._get_index_values(), other, - limit=limit) - cdef class DatetimeEngine(Int64Engine): @@ -668,3 +543,7 @@ cdef inline _to_i8(object val): cdef inline bint _is_utc(object tz): return tz is UTC or isinstance(tz, _du_utc) + + +# Generated from template. +include "index_class_helper.pxi" diff --git a/pandas/src/index_class_helper.pxi.in b/pandas/src/index_class_helper.pxi.in new file mode 100644 index 0000000000000..315dd18009ad4 --- /dev/null +++ b/pandas/src/index_class_helper.pxi.in @@ -0,0 +1,90 @@ +""" +Template for functions of IndexEngine subclasses. + +WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in +""" + +#---------------------------------------------------------------------- +# IndexEngine Subclass Methods +#---------------------------------------------------------------------- + +{{py: + +# name, dtype, ctype +dtypes = [('Float64', 'float64', 'float64_t'), + ('Int64', 'int64', 'int64_t'), + ('Object', 'object', 'object')] +}} + +{{for name, dtype, ctype in dtypes}} + + +cdef class {{name}}Engine(IndexEngine): + + def _call_monotonic(self, values): + return algos.is_monotonic_{{dtype}}(values, timelike=False) + + def get_backfill_indexer(self, other, limit=None): + return algos.backfill_{{dtype}}(self._get_index_values(), + other, limit=limit) + + def get_pad_indexer(self, other, limit=None): + return algos.pad_{{dtype}}(self._get_index_values(), + other, limit=limit) + + cdef _make_hash_table(self, n): + {{if name == 'Object'}} + return _hash.PyObjectHashTable(n) + {{else}} + return _hash.{{name}}HashTable(n) + {{endif}} + + {{if name != 'Float64' and name != 'Object'}} + cdef _check_type(self, object val): + hash(val) + if util.is_bool_object(val): + raise KeyError(val) + elif util.is_float_object(val): + raise KeyError(val) + {{endif}} + + {{if name != 'Object'}} + cdef _get_index_values(self): + return algos.ensure_{{dtype}}(self.vgetter()) + + cdef _maybe_get_bool_indexer(self, object val): + cdef: + ndarray[uint8_t, cast=True] indexer + ndarray[{{ctype}}] values + int count = 0 + Py_ssize_t i, n + int last_true + + {{if name != 'Float64'}} + if not util.is_integer_object(val): + raise KeyError(val) + {{endif}} + + values = self._get_index_values() + n = len(values) + + result = np.empty(n, dtype=bool) + indexer = result.view(np.uint8) + + for i in range(n): + if values[i] == val: + count += 1 + indexer[i] = 1 + last_true = i + else: + indexer[i] = 0 + + if count == 0: + raise KeyError(val) + if count == 1: + return last_true + + return result + {{endif}} + +{{endfor}} diff --git a/setup.py b/setup.py index 0821a7d907e6c..0a84cf527bfb1 100755 --- a/setup.py +++ b/setup.py @@ -116,6 +116,7 @@ def is_platform_mac(): '_join': ['join_helper.pxi.in', 'joins_func_helper.pxi.in'], 'hashtable': ['hashtable_class_helper.pxi.in', 'hashtable_func_helper.pxi.in'], + 'index': ['index_class_helper.pxi.in'], '_sparse': ['sparse_op_helper.pxi.in'] } _pxifiles = []
Title is self-explanatory. xref #14937.
https://api.github.com/repos/pandas-dev/pandas/pulls/14989
2016-12-26T10:24:53Z
2016-12-26T17:17:57Z
2016-12-26T17:17:57Z
2016-12-26T18:33:44Z
CLN: used np.__version__ and removed instantiation
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index f2d837a4c9908..358ac3c30b8e7 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -7,7 +7,7 @@ # numpy versioning -_np_version = np.version.short_version +_np_version = np.__version__ _nlv = LooseVersion(_np_version) _np_version_under1p8 = _nlv < '1.8' _np_version_under1p9 = _nlv < '1.9' @@ -15,7 +15,7 @@ _np_version_under1p11 = _nlv < '1.11' _np_version_under1p12 = _nlv < '1.12' -if LooseVersion(_np_version) < '1.7.0': +if _nlv < '1.7.0': raise ImportError('this version of pandas is incompatible with ' 'numpy < 1.7.0\n' 'your numpy version is {0}.\n'
@rkern and others [recommend](http://stackoverflow.com/questions/1520234/how-to-check-which-version-of-numpy-im-using) using `np.__version__` instead of `np.version`. However I realize this is pandas "internals" so there may be some reason for using `np.version` over `np.__version__`. I could not find any such reason. I also removed a duplicate `LooseVersion(_np_version)` and reused the existing `_nlv` instance. - [x] doesn't close any issue, that I'm aware of - [x] I ran `nosetests .\pandas\tests\test_compat.py` and 5 tests passed - [x] couldn't get the `git` command to work, but `flake8 .\compat\numpy\__init__.py` passes fine - [x] whatsnew entry should be covered by `several new features, enhancements, and performance improvements` in the v0.20.0.txt file
https://api.github.com/repos/pandas-dev/pandas/pulls/14988
2016-12-26T03:42:19Z
2016-12-26T17:16:23Z
2016-12-26T17:16:23Z
2016-12-26T17:16:23Z
ENH: Added 'sum' to the set of statistics returned by df.field.describe()
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 77c2699f5a432..d58bc240790c0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5284,9 +5284,10 @@ def describe(self, percentiles=None, include=None, exclude=None): formatted_percentiles = format_percentiles(percentiles) def describe_numeric_1d(series): - stat_index = (['count', 'mean', 'std', 'min'] + + stat_index = (['count', 'sum', 'mean', 'std', 'min'] + formatted_percentiles + ['max']) - d = ([series.count(), series.mean(), series.std(), series.min()] + + d = ([series.count(), series.sum(), series.mean(), series.std(), + series.min()] + [series.quantile(x) for x in percentiles] + [series.max()]) return pd.Series(d, index=stat_index, name=series.name) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3500ce913462a..0f0b6735e1ddd 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -819,13 +819,15 @@ def test_describe_empty(self): result = pd.Series().describe() self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) + self.assertEqual(result['sum'], 0) + self.assertTrue(result.drop('count').drop("sum").isnull().all()) nanSeries = Series([np.nan]) nanSeries.name = 'NaN' result = nanSeries.describe() self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) + self.assertEqual(result['sum'], 0) + self.assertTrue(result.drop('count').drop("sum").isnull().all()) def test_describe_none(self): noneSeries = Series([None]) @@ -1017,28 +1019,29 @@ def test_describe_percentiles_formatting(self): # default result = df.describe().index - expected = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', - 'max'], + expected = Index(['count', 'sum', 'mean', 'std', 'min', '25%', '50%', + '75%', 'max'], dtype='object') tm.assert_index_equal(result, expected) result = df.describe(percentiles=[0.0001, 0.0005, 0.001, 0.999, 0.9995, 0.9999]).index - expected = Index(['count', 'mean', 'std', 'min', '0.01%', '0.05%', - '0.1%', '50%', '99.9%', '99.95%', '99.99%', 'max'], + expected = Index(['count', 'sum', 'mean', 'std', 'min', '0.01%', + '0.05%', '0.1%', '50%', '99.9%', '99.95%', '99.99%', + 'max'], dtype='object') tm.assert_index_equal(result, expected) result = df.describe(percentiles=[0.00499, 0.005, 0.25, 0.50, 0.75]).index - expected = Index(['count', 'mean', 'std', 'min', '0.499%', '0.5%', - '25%', '50%', '75%', 'max'], + expected = Index(['count', 'sum', 'mean', 'std', 'min', '0.499%', + '0.5%', '25%', '50%', '75%', 'max'], dtype='object') tm.assert_index_equal(result, expected) result = df.describe(percentiles=[0.00499, 0.01001, 0.25, 0.50, 0.75]).index - expected = Index(['count', 'mean', 'std', 'min', '0.5%', '1.0%', + expected = Index(['count', 'sum', 'mean', 'std', 'min', '0.5%', '1.0%', '25%', '50%', '75%', 'max'], dtype='object') tm.assert_index_equal(result, expected) @@ -1085,7 +1088,7 @@ def test_describe_empty_int_columns(self): desc = df[df[0] < 0].describe() # works assert_series_equal(desc.xs('count'), Series([0, 0], dtype=float, name='count')) - self.assertTrue(isnull(desc.ix[1:]).all().all()) + self.assertTrue(isnull(desc.ix[2:]).all().all()) def test_describe_objects(self): df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']}) @@ -1119,9 +1122,10 @@ def test_describe_objects(self): # mix of time, str, numeric df['C3'] = [2, 4, 6, 8, 2] result = df.describe() - expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]}, - index=['count', 'mean', 'std', 'min', '25%', - '50%', '75%', 'max']) + expected = DataFrame({"C3": [5., 22., 4.4, 2.607681, 2., 2., 4., 6., + 8.]}, + index=['count', 'sum', 'mean', 'std', 'min', + '25%', '50%', '75%', 'max']) assert_frame_equal(result, expected) assert_frame_equal(df.describe(), df[['C3']].describe()) @@ -1171,7 +1175,7 @@ def test_describe_typefiltering(self): assert_frame_equal(desc, descN) desc = df.describe(percentiles=[], include='all') - cnt = Series(data=[4, 4, 6, 6, 6], + cnt = Series(data=[4, 4, 7, 7, 6], index=['catA', 'catB', 'numC', 'numD', 'ts']) assert_series_equal(desc.count(), cnt) self.assertTrue('count' in desc.index) @@ -1232,10 +1236,10 @@ def test_describe_typefiltering_groupby(self): 'numD': np.arange(24.) + .5, 'ts': tm.makeTimeSeries()[:24].index}) G = df.groupby('catA') - self.assertTrue(G.describe(include=['number']).shape == (16, 2)) - self.assertTrue(G.describe(include=['number', 'object']).shape == (22, + self.assertTrue(G.describe(include=['number']).shape == (18, 2)) + self.assertTrue(G.describe(include=['number', 'object']).shape == (24, 3)) - self.assertTrue(G.describe(include='all').shape == (26, 4)) + self.assertTrue(G.describe(include='all').shape == (28, 4)) def test_describe_multi_index_df_column_names(self): """ Test that column names persist after the describe operation."""
I've added ``sum`` to the set of statistics returned by the ``describe`` method. I believe it should be added because in my experience, the sum is just as commonly needed as the other values already returned by ``describe``. I have often found myself writing an additional line of code after ``describe`` to calculate the sum of the same series. Adding sum to the mix would remove that requirement and make life easier for myself and others. I believe I am not alone. For instance, ``sum`` is already included in the descriptive statistics methods of other programs, like Microsoft Excel. - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14986
2016-12-26T00:25:01Z
2016-12-26T17:12:46Z
null
2023-05-11T01:14:50Z
DOC: Refactor numeric index docs
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index 1cc546629589d..b44c261833383 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -1128,17 +1128,17 @@ def is_mixed(self): def holds_integer(self): return self.inferred_type in ['integer', 'mixed-integer'] - # validate / convert indexers - def _convert_scalar_indexer(self, key, kind=None): - """ - convert a scalar indexer + _index_shared_docs['_convert_scalar_indexer'] = """ + Convert a scalar indexer. Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem', 'iloc'} or None - """ + """ + @Appender(_index_shared_docs['_convert_scalar_indexer']) + def _convert_scalar_indexer(self, key, kind=None): assert kind in ['ix', 'loc', 'getitem', 'iloc', None] if kind == 'iloc': @@ -1173,15 +1173,20 @@ def _convert_scalar_indexer(self, key, kind=None): return key - def _convert_slice_indexer(self, key, kind=None): - """ - convert a slice indexer. disallow floats in the start/stop/step + _index_shared_docs['_convert_slice_indexer'] = """ + Convert a slice indexer. + + By definition, these are labels unless 'iloc' is passed in. + Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem', 'iloc'} or None - """ + """ + + @Appender(_index_shared_docs['_convert_slice_indexer']) + def _convert_slice_indexer(self, key, kind=None): assert kind in ['ix', 'loc', 'getitem', 'iloc', None] # if we are not a slice, then we are done @@ -2102,9 +2107,8 @@ def _get_unique_index(self, dropna=False): return self._shallow_copy(values) - def get_loc(self, key, method=None, tolerance=None): - """ - Get integer location for requested label + _index_shared_docs['get_loc'] = """ + Get integer location for requested label. Parameters ---------- @@ -2125,7 +2129,10 @@ def get_loc(self, key, method=None, tolerance=None): Returns ------- loc : int if unique index, possibly slice or mask if not - """ + """ + + @Appender(_index_shared_docs['get_loc']) + def get_loc(self, key, method=None, tolerance=None): if method is None: if tolerance is not None: raise ValueError('tolerance argument only valid if using pad, ' @@ -3047,8 +3054,7 @@ def _validate_indexer(self, form, key, kind): self._invalid_indexer(form, key) return key - def _maybe_cast_slice_bound(self, label, side, kind): - """ + _index_shared_docs['_maybe_cast_slice_bound'] = """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. @@ -3068,6 +3074,9 @@ def _maybe_cast_slice_bound(self, label, side, kind): Value of `side` parameter should be validated in caller. """ + + @Appender(_index_shared_docs['_maybe_cast_slice_bound']) + def _maybe_cast_slice_bound(self, label, side, kind): assert kind in ['ix', 'loc', 'getitem', None] # We are a plain index here (sub-class override this method if they diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py index 97f7093e99064..c71abe202226e 100644 --- a/pandas/indexes/numeric.py +++ b/pandas/indexes/numeric.py @@ -16,6 +16,9 @@ import pandas.indexes.base as ibase +_num_index_shared_docs = dict() + + class NumericIndex(Index): """ Provide numeric type operations @@ -47,27 +50,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, name = data.name return cls._simple_new(subarr, name=name) + @Appender(_index_shared_docs['_maybe_cast_slice_bound']) def _maybe_cast_slice_bound(self, label, side, kind): - """ - This function should be overloaded in subclasses that allow non-trivial - casting on label-slice bounds, e.g. datetime-like indices allowing - strings containing formatted datetimes. - - Parameters - ---------- - label : object - side : {'left', 'right'} - kind : {'ix', 'loc', 'getitem'} - - Returns - ------- - label : object - - Notes - ----- - Value of `side` parameter should be validated in caller. - - """ assert kind in ['ix', 'loc', 'getitem', None] # we will try to coerce to integers @@ -90,27 +74,37 @@ def _assert_safe_casting(cls, data, subarr): pass -class Int64Index(NumericIndex): - """ +_num_index_shared_docs['class_descr'] = """ Immutable ndarray implementing an ordered, sliceable set. The basic object - storing axis labels for all pandas objects. Int64Index is a special case - of `Index` with purely integer labels. This is the default index type used - by the DataFrame and Series ctors when no explicit index is provided by the - user. + storing axis labels for all pandas objects. %(klass)s is a special case + of `Index` with purely %(ltype)s labels. %(extra)s Parameters ---------- data : array-like (1-dimensional) - dtype : NumPy dtype (default: int64) + dtype : NumPy dtype (default: %(dtype)s) copy : bool Make a copy of input ndarray name : object Name to be stored in the index - Notes ----- - An Index instance can **only** contain hashable objects - """ + An Index instance can **only** contain hashable objects. +""" + +_int64_descr_args = dict( + klass='Int64Index', + ltype='integer', + dtype='int64', + extra="""This is the default index type used + by the DataFrame and Series ctors when no explicit + index is provided by the user. +""" +) + + +class Int64Index(NumericIndex): + __doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args _typ = 'int64index' _arrmap = _algos.arrmap_int64 @@ -141,16 +135,8 @@ def is_all_dates(self): """ return False + @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): - """ - convert a scalar indexer - - Parameters - ---------- - key : label of the slice bound - kind : {'ix', 'loc', 'getitem'} or None - """ - assert kind in ['ix', 'loc', 'getitem', 'iloc', None] # don't coerce ilocs to integers @@ -177,25 +163,16 @@ def _assert_safe_casting(cls, data, subarr): Int64Index._add_logical_methods() -class Float64Index(NumericIndex): - """ - Immutable ndarray implementing an ordered, sliceable set. The basic object - storing axis labels for all pandas objects. Float64Index is a special case - of `Index` with purely floating point labels. +_float64_descr_args = dict( + klass='Float64Index', + dtype='float64', + ltype='float', + extra='' +) - Parameters - ---------- - data : array-like (1-dimensional) - dtype : NumPy dtype (default: object) - copy : bool - Make a copy of input ndarray - name : object - Name to be stored in the index - Notes - ----- - An Float64Index instance can **only** contain hashable objects - """ +class Float64Index(NumericIndex): + __doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args _typ = 'float64index' _engine_type = _index.Float64Engine @@ -228,6 +205,7 @@ def astype(self, dtype, copy=True): self.__class__) return Index(values, name=self.name, dtype=dtype) + @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): """ convert a scalar indexer @@ -245,17 +223,8 @@ def _convert_scalar_indexer(self, key, kind=None): return key + @Appender(_index_shared_docs['_convert_slice_indexer']) def _convert_slice_indexer(self, key, kind=None): - """ - convert a slice indexer, by definition these are labels - unless we are iloc - - Parameters - ---------- - key : label of the slice bound - kind : optional, type of the indexing operation (loc/ix/iloc/None) - """ - # if we are not a slice, then we are done if not isinstance(key, slice): return key @@ -325,6 +294,7 @@ def __contains__(self, other): except: return False + @Appender(_index_shared_docs['get_loc']) def get_loc(self, key, method=None, tolerance=None): try: if np.all(np.isnan(key)):
Refactor `NumericIndex` docs to avoid duplicate documentation. xref #14937.
https://api.github.com/repos/pandas-dev/pandas/pulls/14985
2016-12-25T23:48:25Z
2016-12-30T19:05:34Z
null
2016-12-30T19:23:46Z
BUG: Avoid flaky usecols set in C engine
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0873e4b34b0b1..1341ce2710f57 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -321,3 +321,4 @@ Bug Fixes - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) - Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) - Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`) +- Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index fdd753d1870b9..2332a9ade93ff 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -987,24 +987,42 @@ def _evaluate_usecols(usecols, names): def _validate_usecols_arg(usecols): """ - Check whether or not the 'usecols' parameter - contains all integers (column selection by index), - strings (column by name) or is a callable. Raises - a ValueError if that is not the case. + Validate the 'usecols' parameter. + + Checks whether or not the 'usecols' parameter contains all integers + (column selection by index), strings (column by name) or is a callable. + Raises a ValueError if that is not the case. + + Parameters + ---------- + usecols : array-like, callable, or None + List of columns to use when parsing or a callable that can be used + to filter a list of table columns. + + Returns + ------- + usecols_tuple : tuple + A tuple of (verified_usecols, usecols_dtype). + + 'verified_usecols' is either a set if an array-like is passed in or + 'usecols' if a callable or None is passed in. + + 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like + is passed in or None if a callable or None is passed in. """ msg = ("'usecols' must either be all strings, all unicode, " "all integers or a callable") if usecols is not None: if callable(usecols): - return usecols + return usecols, None usecols_dtype = lib.infer_dtype(usecols) if usecols_dtype not in ('empty', 'integer', 'string', 'unicode'): raise ValueError(msg) - return set(usecols) - return usecols + return set(usecols), usecols_dtype + return usecols, None def _validate_parse_dates_arg(parse_dates): @@ -1473,7 +1491,8 @@ def __init__(self, src, **kwds): self._reader = _parser.TextReader(src, **kwds) # XXX - self.usecols = _validate_usecols_arg(self._reader.usecols) + self.usecols, self.usecols_dtype = _validate_usecols_arg( + self._reader.usecols) passed_names = self.names is None @@ -1549,12 +1568,29 @@ def close(self): pass def _set_noconvert_columns(self): + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. + """ names = self.orig_names - usecols = self.usecols + if self.usecols_dtype == 'integer': + # A set of integers will be converted to a list in + # the correct order every single time. + usecols = list(self.usecols) + elif (callable(self.usecols) or + self.usecols_dtype not in ('empty', None)): + # The names attribute should have the correct columns + # in the proper order for indexing with parse_dates. + usecols = self.names[:] + else: + # Usecols is empty. + usecols = None def _set(x): - if usecols and is_integer(x): - x = list(usecols)[x] + if usecols is not None and is_integer(x): + x = usecols[x] if not is_integer(x): x = names.index(x) @@ -1792,7 +1828,7 @@ def __init__(self, f, **kwds): self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] - self.usecols = _validate_usecols_arg(kwds['usecols']) + self.usecols, _ = _validate_usecols_arg(kwds['usecols']) self.skip_blank_lines = kwds['skip_blank_lines'] self.names_passed = kwds['names'] or None diff --git a/pandas/io/tests/parser/usecols.py b/pandas/io/tests/parser/usecols.py index 26b4b5b8ec7d1..4fb6ff00e2d7b 100644 --- a/pandas/io/tests/parser/usecols.py +++ b/pandas/io/tests/parser/usecols.py @@ -200,6 +200,31 @@ def test_usecols_with_parse_dates(self): parse_dates=parse_dates) tm.assert_frame_equal(df, expected) + # See gh-14792 + s = """a,b,c,d,e,f,g,h,i,j + 2016/09/21,1,1,2,3,4,5,6,7,8""" + parse_dates = [0] + usecols = list('abcdefghij') + cols = {'a': Timestamp('2016-09-21'), + 'b': [1], 'c': [1], 'd': [2], + 'e': [3], 'f': [4], 'g': [5], + 'h': [6], 'i': [7], 'j': [8]} + expected = DataFrame(cols, columns=usecols) + df = self.read_csv(StringIO(s), usecols=usecols, + parse_dates=parse_dates) + tm.assert_frame_equal(df, expected) + + s = """a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8""" + parse_dates = [[0, 1]] + usecols = list('abcdefghij') + cols = {'a_b': '2016/09/21 1', + 'c': [1], 'd': [2], 'e': [3], 'f': [4], + 'g': [5], 'h': [6], 'i': [7], 'j': [8]} + expected = DataFrame(cols, columns=['a_b'] + list('cdefghij')) + df = self.read_csv(StringIO(s), usecols=usecols, + parse_dates=parse_dates) + tm.assert_frame_equal(df, expected) + def test_usecols_with_parse_dates_and_full_names(self): # See gh-9755 s = """0,1,20140101,0900,4
Explanation of the bug can be found <a href="https://github.com/pandas-dev/pandas/issues/14792#issuecomment-269115098">here</a>. Closes #14792.
https://api.github.com/repos/pandas-dev/pandas/pulls/14984
2016-12-25T19:42:36Z
2016-12-30T19:27:40Z
null
2016-12-30T19:28:46Z
Fixed power operation on indicies
diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index 1cc546629589d..363870f43c0c0 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -3534,7 +3534,9 @@ def _evaluate_numeric_binop(self, other): operator.sub, '__sub__', reversed=True) cls.__mul__ = cls.__rmul__ = _make_evaluate_binop( operator.mul, '__mul__') - cls.__pow__ = cls.__rpow__ = _make_evaluate_binop( + cls.__rpow__ = _make_evaluate_binop( + operator.pow, '__pow__', reversed=False) + cls.__pow__ = _make_evaluate_binop( operator.pow, '__pow__') cls.__mod__ = _make_evaluate_binop( operator.mod, '__mod__')
Fixed power operation on indicies to allow for power on rows. See #14973 - [ ] closes #14973 - [ ] tests added / passed - [ ] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14981
2016-12-25T00:28:57Z
2016-12-27T05:38:47Z
null
2016-12-27T11:39:31Z
DOC: Adding repository pandas_exercises to tutorials.rst
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index c25e734a046b2..5ff4e955a9ed2 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -123,6 +123,33 @@ There are four sections covering selected topics as follows: - `Time Series <http://wavedatalab.github.io/datawithpython/timeseries.html>`_ +.. _tutorial-exercises-new-users: + +Exercises for New Users +----------------------- +Practice your skills with real data sets and exercises. +For more resources, please visit the main `repository <https://github.com/guipsamora/pandas_exercises>`_. + +- `01 - Getting & Knowing Your Data <https://github.com/guipsamora/pandas_exercises/tree/master/01_Getting_%26_Knowing_Your_Data>`_ + +- `02 - Filtering & Sorting <https://github.com/guipsamora/pandas_exercises/tree/master/02_Filtering_%26_Sorting>`_ + +- `03 - Grouping <https://github.com/guipsamora/pandas_exercises/tree/master/03_Grouping>`_ + +- `04 - Apply <https://github.com/guipsamora/pandas_exercises/tree/master/04_Apply>`_ + +- `05 - Merge <https://github.com/guipsamora/pandas_exercises/tree/master/05_Merge>`_ + +- `06 - Stats <https://github.com/guipsamora/pandas_exercises/tree/master/06_Stats>`_ + +- `07 - Visualization <https://github.com/guipsamora/pandas_exercises/tree/master/07_Visualization>`_ + +- `08 - Creating Series and DataFrames <https://github.com/guipsamora/pandas_exercises/tree/master/08_Creating_Series_and_DataFrames/Pokemon>`_ + +- `09 - Time Series <https://github.com/guipsamora/pandas_exercises/tree/master/09_Time_Series>`_ + +- `10 - Deleting <https://github.com/guipsamora/pandas_exercises/tree/master/10_Deleting>`_ + .. _tutorial-modern: Modern Pandas
https://api.github.com/repos/pandas-dev/pandas/pulls/14980
2016-12-24T19:26:05Z
2017-01-02T10:12:44Z
2017-01-02T10:12:44Z
2017-01-02T10:14:11Z
Clean up py36 build
diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index b9d543f557d06..684dab333648a 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -1,3 +1,4 @@ python-dateutil pytz numpy +scipy diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 950ad53abe5e0..7eba32b4932d0 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2466,7 +2466,7 @@ def is_in_obj(gpr): "Defaulting to column but " "this will raise an ambiguity error in a " "future version") % gpr, - FutureWarning, stacklevel=2) + FutureWarning, stacklevel=5) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) elif gpr in obj.index.names: @@ -4025,7 +4025,9 @@ def __iter__(self): sdata = self._get_sorted_data() if self.ngroups == 0: - raise StopIteration + # we are inside a generator, rather than raise StopIteration + # we merely return signal the end + return starts, ends = lib.generate_slices(self.slabels, self.ngroups) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 8e4246787ed5b..fdd753d1870b9 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -385,14 +385,20 @@ def _read(filepath_or_buffer, kwds): raise NotImplementedError("'nrows' and 'chunksize' cannot be used" " together yet.") elif nrows is not None: - data = parser.read(nrows) - parser.close() + try: + data = parser.read(nrows) + finally: + parser.close() return data + elif chunksize or iterator: return parser - data = parser.read() - parser.close() + try: + data = parser.read() + finally: + parser.close() + return data _parser_defaults = { diff --git a/pandas/io/stata.py b/pandas/io/stata.py index c35e07be2c31a..512a224555577 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -169,10 +169,13 @@ def read_stata(filepath_or_buffer, convert_dates=True, if iterator or chunksize: data = reader else: - data = reader.read() - reader.close() + try: + data = reader.read() + finally: + reader.close() return data + _date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"] diff --git a/pandas/io/tests/parser/test_unsupported.py b/pandas/io/tests/parser/test_unsupported.py index ffd1cfa9a2538..64f31a11440d8 100644 --- a/pandas/io/tests/parser/test_unsupported.py +++ b/pandas/io/tests/parser/test_unsupported.py @@ -69,9 +69,9 @@ def test_c_engine(self): msg = 'Error tokenizing data' with tm.assertRaisesRegexp(ParserError, msg): - read_table(StringIO(text), sep='\s+') + read_table(StringIO(text), sep='\\s+') with tm.assertRaisesRegexp(ParserError, msg): - read_table(StringIO(text), engine='c', sep='\s+') + read_table(StringIO(text), engine='c', sep='\\s+') msg = "Only length-1 thousands markers supported" data = """A|B|C diff --git a/pandas/sparse/tests/test_array.py b/pandas/sparse/tests/test_array.py index bd896ae5b86d9..592926f8e821d 100644 --- a/pandas/sparse/tests/test_array.py +++ b/pandas/sparse/tests/test_array.py @@ -361,7 +361,7 @@ def test_astype(self): arr.astype('i8') arr = SparseArray([0, np.nan, 0, 1], fill_value=0) - msg = 'Cannot convert non-finite values \(NA or inf\) to integer' + msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' with tm.assertRaisesRegexp(ValueError, msg): arr.astype('i8') @@ -708,7 +708,7 @@ def test_cumsum(self): tm.assert_sp_array_equal(out, expected) axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid. - msg = "axis\(={axis}\) out of bounds".format(axis=axis) + msg = "axis\\(={axis}\\) out of bounds".format(axis=axis) with tm.assertRaisesRegexp(ValueError, msg): SparseArray(data).cumsum(axis=axis) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 43a108e9acc80..ca757a821910a 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -390,7 +390,7 @@ def test_astype_cast_nan_inf_int(self): # GH14265, check nan and inf raise error when converting to int types = [np.int32, np.int64] values = [np.nan, np.inf] - msg = 'Cannot convert non-finite values \(NA or inf\) to integer' + msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' for this_type in types: for this_val in values: diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 3eafbaf912797..f02b3168b233f 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -46,7 +46,7 @@ def test_astype_cast_nan_inf_int(self): # GH14265, check nan and inf raise error when converting to int types = [np.int32, np.int64] values = [np.nan, np.inf] - msg = 'Cannot convert non-finite values \(NA or inf\) to integer' + msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' for this_type in types: for this_val in values: diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index 5e800c02c9509..e63cfcc8c0590 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -381,21 +381,26 @@ def _check_output(result, values_col, index=['A', 'B'], df = df.set_index(['JOB', 'NAME', 'YEAR', 'MONTH'], drop=False, append=False) - result = df.pivot_table(index=['JOB', 'NAME'], - columns=['YEAR', 'MONTH'], - values=['DAYS', 'SALARY'], - aggfunc={'DAYS': 'mean', 'SALARY': 'sum'}, - margins=True) - - expected = df.pivot_table(index=['JOB', 'NAME'], - columns=['YEAR', 'MONTH'], values=['DAYS'], - aggfunc='mean', margins=True) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = df.pivot_table(index=['JOB', 'NAME'], + columns=['YEAR', 'MONTH'], + values=['DAYS', 'SALARY'], + aggfunc={'DAYS': 'mean', 'SALARY': 'sum'}, + margins=True) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + expected = df.pivot_table(index=['JOB', 'NAME'], + columns=['YEAR', 'MONTH'], + values=['DAYS'], + aggfunc='mean', margins=True) tm.assert_frame_equal(result['DAYS'], expected['DAYS']) - expected = df.pivot_table(index=['JOB', 'NAME'], - columns=['YEAR', 'MONTH'], values=['SALARY'], - aggfunc='sum', margins=True) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + expected = df.pivot_table(index=['JOB', 'NAME'], + columns=['YEAR', 'MONTH'], + values=['SALARY'], + aggfunc='sum', margins=True) tm.assert_frame_equal(result['SALARY'], expected['SALARY'])
- remove some deprecation warnings - fix use of StopIteration in a generator - add scipy to build
https://api.github.com/repos/pandas-dev/pandas/pulls/14979
2016-12-24T17:59:37Z
2016-12-24T20:40:25Z
null
2016-12-24T20:40:25Z
BUG: applymap on empty DataFrame returns Series (#8222)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 4799d2711231b..40bd8bc4154a6 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -237,6 +237,7 @@ Other API Changes - ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv`` and will be removed in the future (:issue:`12665`) - ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`) +- ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) .. _whatsnew_0200.deprecations: @@ -286,7 +287,6 @@ Bug Fixes - Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`) - - Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) - Bug in ``Series.ffill()`` with mixed dtypes containing tz-aware datetimes. (:issue:`14956`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7305df0f57736..ba1e08ecc482f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4288,6 +4288,8 @@ def applymap(self, func): # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): + if x.empty: + return lib.map_infer(x, func) return lib.map_infer(x.asobject, func) return self.apply(infer) diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 5cadb4dba577f..9e68b7e76d78f 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -405,6 +405,16 @@ def test_applymap(self): for f in ['datetime', 'timedelta']: self.assertEqual(result.loc[0, f], str(df.loc[0, f])) + # GH 8222 + empty_frames = [pd.DataFrame(), + pd.DataFrame(columns=list('ABC')), + pd.DataFrame(index=list('ABC')), + pd.DataFrame({'A': [], 'B': [], 'C': []})] + for frame in empty_frames: + for func in [round, lambda x: x]: + result = frame.applymap(func) + tm.assert_frame_equal(result, frame) + def test_applymap_box(self): # ufunc will not be boxed. Same test cases as the test_map_box df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
- [x] closes #8222 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry for 0.20.0 Within `applymap`, the data is converted into a numpy array with `asobject` [here](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L4291) and evaluated with the `apply` method `_apply_empty_result` since this frame is empty. The numpy array is compared to an `Series` [here](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L4112) which proceeds to get returned as a `Series` [here](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L4118). Found that passing in the data without using `asobject` allows the logic to return a copy of the empty dataframe [here](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L4120).
https://api.github.com/repos/pandas-dev/pandas/pulls/14977
2016-12-24T04:18:10Z
2016-12-24T20:53:51Z
2016-12-24T20:53:51Z
2017-12-20T02:03:40Z
BUG: GH14882 Incorrect index label displayed on MultiIndex DataFrame
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 4799d2711231b..48375e9a8e0dd 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -318,3 +318,5 @@ Bug Fixes - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) - Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) - Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`) +- Bug in HTML display with MultiIndex and truncation (:issue:`14882`) + diff --git a/pandas/formats/format.py b/pandas/formats/format.py index 0cf6050e515e0..211e61842c7a7 100644 --- a/pandas/formats/format.py +++ b/pandas/formats/format.py @@ -1248,6 +1248,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): # Insert ... row and adjust idx_values and # level_lengths to take this into account. ins_row = self.fmt.tr_row_num + inserted = False for lnum, records in enumerate(level_lengths): rec_new = {} for tag, span in list(records.items()): @@ -1255,9 +1256,17 @@ def _write_hierarchical_rows(self, fmt_values, indent): rec_new[tag + 1] = span elif tag + span > ins_row: rec_new[tag] = span + 1 - dot_row = list(idx_values[ins_row - 1]) - dot_row[-1] = u('...') - idx_values.insert(ins_row, tuple(dot_row)) + + # GH 14882 - Make sure insertion done once + if not inserted: + dot_row = list(idx_values[ins_row - 1]) + dot_row[-1] = u('...') + idx_values.insert(ins_row, tuple(dot_row)) + inserted = True + else: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = u('...') + idx_values[ins_row] = tuple(dot_row) else: rec_new[tag] = span # If ins_row lies between tags, all cols idx cols @@ -1267,6 +1276,12 @@ def _write_hierarchical_rows(self, fmt_values, indent): if lnum == 0: idx_values.insert(ins_row, tuple( [u('...')] * len(level_lengths))) + + # GH 14882 - Place ... in correct level + elif inserted: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = u('...') + idx_values[ins_row] = tuple(dot_row) level_lengths[lnum] = rec_new level_lengths[inner_lvl][ins_row] = 1 diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py index e7c32a4baa4ea..f709d3e3e97ba 100644 --- a/pandas/tests/formats/test_format.py +++ b/pandas/tests/formats/test_format.py @@ -1214,6 +1214,554 @@ def test_to_html_multiindex_sparsify(self): self.assertEqual(result, expected) + # GH 14882 - Issue on truncation with odd length DataFrame + def test_to_html_multiindex_odd_even_truncate(self): + mi = MultiIndex.from_product([[100, 200, 300], + [10, 20, 30], + [1, 2, 3, 4, 5, 6, 7]], + names=['a','b','c']) + df = DataFrame({'n' : range(len(mi))}, index = mi) + result = df.to_html(max_rows=60) + expected = """\ +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th></th> + <th></th> + <th>n</th> + </tr> + <tr> + <th>a</th> + <th>b</th> + <th>c</th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th rowspan="21" valign="top">100</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>0</td> + </tr> + <tr> + <th>2</th> + <td>1</td> + </tr> + <tr> + <th>3</th> + <td>2</td> + </tr> + <tr> + <th>4</th> + <td>3</td> + </tr> + <tr> + <th>5</th> + <td>4</td> + </tr> + <tr> + <th>6</th> + <td>5</td> + </tr> + <tr> + <th>7</th> + <td>6</td> + </tr> + <tr> + <th rowspan="7" valign="top">20</th> + <th>1</th> + <td>7</td> + </tr> + <tr> + <th>2</th> + <td>8</td> + </tr> + <tr> + <th>3</th> + <td>9</td> + </tr> + <tr> + <th>4</th> + <td>10</td> + </tr> + <tr> + <th>5</th> + <td>11</td> + </tr> + <tr> + <th>6</th> + <td>12</td> + </tr> + <tr> + <th>7</th> + <td>13</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>14</td> + </tr> + <tr> + <th>2</th> + <td>15</td> + </tr> + <tr> + <th>3</th> + <td>16</td> + </tr> + <tr> + <th>4</th> + <td>17</td> + </tr> + <tr> + <th>5</th> + <td>18</td> + </tr> + <tr> + <th>6</th> + <td>19</td> + </tr> + <tr> + <th>7</th> + <td>20</td> + </tr> + <tr> + <th rowspan="19" valign="top">200</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>21</td> + </tr> + <tr> + <th>2</th> + <td>22</td> + </tr> + <tr> + <th>3</th> + <td>23</td> + </tr> + <tr> + <th>4</th> + <td>24</td> + </tr> + <tr> + <th>5</th> + <td>25</td> + </tr> + <tr> + <th>6</th> + <td>26</td> + </tr> + <tr> + <th>7</th> + <td>27</td> + </tr> + <tr> + <th rowspan="5" valign="top">20</th> + <th>1</th> + <td>28</td> + </tr> + <tr> + <th>2</th> + <td>29</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th>6</th> + <td>33</td> + </tr> + <tr> + <th>7</th> + <td>34</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>35</td> + </tr> + <tr> + <th>2</th> + <td>36</td> + </tr> + <tr> + <th>3</th> + <td>37</td> + </tr> + <tr> + <th>4</th> + <td>38</td> + </tr> + <tr> + <th>5</th> + <td>39</td> + </tr> + <tr> + <th>6</th> + <td>40</td> + </tr> + <tr> + <th>7</th> + <td>41</td> + </tr> + <tr> + <th rowspan="21" valign="top">300</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>42</td> + </tr> + <tr> + <th>2</th> + <td>43</td> + </tr> + <tr> + <th>3</th> + <td>44</td> + </tr> + <tr> + <th>4</th> + <td>45</td> + </tr> + <tr> + <th>5</th> + <td>46</td> + </tr> + <tr> + <th>6</th> + <td>47</td> + </tr> + <tr> + <th>7</th> + <td>48</td> + </tr> + <tr> + <th rowspan="7" valign="top">20</th> + <th>1</th> + <td>49</td> + </tr> + <tr> + <th>2</th> + <td>50</td> + </tr> + <tr> + <th>3</th> + <td>51</td> + </tr> + <tr> + <th>4</th> + <td>52</td> + </tr> + <tr> + <th>5</th> + <td>53</td> + </tr> + <tr> + <th>6</th> + <td>54</td> + </tr> + <tr> + <th>7</th> + <td>55</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>56</td> + </tr> + <tr> + <th>2</th> + <td>57</td> + </tr> + <tr> + <th>3</th> + <td>58</td> + </tr> + <tr> + <th>4</th> + <td>59</td> + </tr> + <tr> + <th>5</th> + <td>60</td> + </tr> + <tr> + <th>6</th> + <td>61</td> + </tr> + <tr> + <th>7</th> + <td>62</td> + </tr> + </tbody> +</table>""" + self.assertEqual(result, expected) + + # Test that ... appears in a middle level + result = df.to_html(max_rows=56) + expected = """\ +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th></th> + <th></th> + <th>n</th> + </tr> + <tr> + <th>a</th> + <th>b</th> + <th>c</th> + <th></th> + </tr> + </thead> + <tbody> + <tr> + <th rowspan="21" valign="top">100</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>0</td> + </tr> + <tr> + <th>2</th> + <td>1</td> + </tr> + <tr> + <th>3</th> + <td>2</td> + </tr> + <tr> + <th>4</th> + <td>3</td> + </tr> + <tr> + <th>5</th> + <td>4</td> + </tr> + <tr> + <th>6</th> + <td>5</td> + </tr> + <tr> + <th>7</th> + <td>6</td> + </tr> + <tr> + <th rowspan="7" valign="top">20</th> + <th>1</th> + <td>7</td> + </tr> + <tr> + <th>2</th> + <td>8</td> + </tr> + <tr> + <th>3</th> + <td>9</td> + </tr> + <tr> + <th>4</th> + <td>10</td> + </tr> + <tr> + <th>5</th> + <td>11</td> + </tr> + <tr> + <th>6</th> + <td>12</td> + </tr> + <tr> + <th>7</th> + <td>13</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>14</td> + </tr> + <tr> + <th>2</th> + <td>15</td> + </tr> + <tr> + <th>3</th> + <td>16</td> + </tr> + <tr> + <th>4</th> + <td>17</td> + </tr> + <tr> + <th>5</th> + <td>18</td> + </tr> + <tr> + <th>6</th> + <td>19</td> + </tr> + <tr> + <th>7</th> + <td>20</td> + </tr> + <tr> + <th rowspan="15" valign="top">200</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>21</td> + </tr> + <tr> + <th>2</th> + <td>22</td> + </tr> + <tr> + <th>3</th> + <td>23</td> + </tr> + <tr> + <th>4</th> + <td>24</td> + </tr> + <tr> + <th>5</th> + <td>25</td> + </tr> + <tr> + <th>6</th> + <td>26</td> + </tr> + <tr> + <th>7</th> + <td>27</td> + </tr> + <tr> + <th>...</th> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>35</td> + </tr> + <tr> + <th>2</th> + <td>36</td> + </tr> + <tr> + <th>3</th> + <td>37</td> + </tr> + <tr> + <th>4</th> + <td>38</td> + </tr> + <tr> + <th>5</th> + <td>39</td> + </tr> + <tr> + <th>6</th> + <td>40</td> + </tr> + <tr> + <th>7</th> + <td>41</td> + </tr> + <tr> + <th rowspan="21" valign="top">300</th> + <th rowspan="7" valign="top">10</th> + <th>1</th> + <td>42</td> + </tr> + <tr> + <th>2</th> + <td>43</td> + </tr> + <tr> + <th>3</th> + <td>44</td> + </tr> + <tr> + <th>4</th> + <td>45</td> + </tr> + <tr> + <th>5</th> + <td>46</td> + </tr> + <tr> + <th>6</th> + <td>47</td> + </tr> + <tr> + <th>7</th> + <td>48</td> + </tr> + <tr> + <th rowspan="7" valign="top">20</th> + <th>1</th> + <td>49</td> + </tr> + <tr> + <th>2</th> + <td>50</td> + </tr> + <tr> + <th>3</th> + <td>51</td> + </tr> + <tr> + <th>4</th> + <td>52</td> + </tr> + <tr> + <th>5</th> + <td>53</td> + </tr> + <tr> + <th>6</th> + <td>54</td> + </tr> + <tr> + <th>7</th> + <td>55</td> + </tr> + <tr> + <th rowspan="7" valign="top">30</th> + <th>1</th> + <td>56</td> + </tr> + <tr> + <th>2</th> + <td>57</td> + </tr> + <tr> + <th>3</th> + <td>58</td> + </tr> + <tr> + <th>4</th> + <td>59</td> + </tr> + <tr> + <th>5</th> + <td>60</td> + </tr> + <tr> + <th>6</th> + <td>61</td> + </tr> + <tr> + <th>7</th> + <td>62</td> + </tr> + </tbody> +</table>""" + self.assertEqual(result, expected) + def test_to_html_index_formatter(self): df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=['foo', None], index=lrange(4))
- [x] closes #14882 - [x] tests added / passed Add `tests/formats/test_format.py:TestDataFrameFormatting.test_to_html_multiindex_odd_even_truncate` - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14975
2016-12-23T22:09:35Z
2017-01-21T23:07:44Z
null
2017-01-23T15:59:22Z
DOC: Fix typo in docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3678168890444..77c2699f5a432 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -690,7 +690,7 @@ def f(x): def rename_axis(self, mapper, axis=0, copy=True, inplace=False): """ Alter index and / or columns using input function or functions. - A scaler or list-like for ``mapper`` will alter the ``Index.name`` + A scalar or list-like for ``mapper`` will alter the ``Index.name`` or ``MultiIndex.names`` attribute. A function or dict for ``mapper`` will alter the labels. Function / dict values must be unique (1-to-1). Labels not contained in
scaler->scalar
https://api.github.com/repos/pandas-dev/pandas/pulls/14970
2016-12-23T00:35:04Z
2016-12-23T01:13:26Z
2016-12-23T01:13:26Z
2017-04-22T21:40:05Z
[Depr] raise_on_error kwarg with errors kwarg in astype#14878
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index e7b2fc5a6505d..4027edd6eb9eb 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -247,7 +247,7 @@ Deprecations - ``Index.repeat()`` and ``MultiIndex.repeat()`` have deprecated the ``n`` parameter in favor of ``repeats`` (:issue:`12662`) - ``Categorical.searchsorted()`` and ``Series.searchsorted()`` have deprecated the ``v`` parameter in favor of ``value`` (:issue:`12662`) - ``TimedeltaIndex.searchsorted()``, ``DatetimeIndex.searchsorted()``, and ``PeriodIndex.searchsorted()`` have deprecated the ``key`` parameter in favor of ``value`` (:issue:`12662`) - +- ``DataFrame.astype()`` has deprecated the ``raise_on_error`` parameter in favor of ``errors`` (:issue:`14878`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3678168890444..cd4b95ad48e0d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3061,7 +3061,9 @@ def blocks(self): """Internal property, property synonym for as_blocks()""" return self.as_blocks() - def astype(self, dtype, copy=True, raise_on_error=True, **kwargs): + @deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors', + mapping={True: 'raise', False: 'ignore'}) + def astype(self, dtype, copy=True, errors='raise', **kwargs): """ Cast object to input numpy.dtype Return a copy when copy = True (be really careful with this!) @@ -3073,7 +3075,15 @@ def astype(self, dtype, copy=True, raise_on_error=True, **kwargs): the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. - raise_on_error : raise on invalid input + errors : {'raise', 'ignore'}, default 'raise'. + Control raising of exceptions on invalid data for provided dtype. + + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + .. versionadded:: 0.20.0 + + raise_on_error : DEPRECATED use ``errors`` instead kwargs : keyword arguments to pass on to the constructor Returns @@ -3086,7 +3096,7 @@ def astype(self, dtype, copy=True, raise_on_error=True, **kwargs): raise KeyError('Only the Series name can be used for ' 'the key in Series dtype mappings.') new_type = list(dtype.values())[0] - return self.astype(new_type, copy, raise_on_error, **kwargs) + return self.astype(new_type, copy, errors, **kwargs) elif self.ndim > 2: raise NotImplementedError( 'astype() only accepts a dtype arg of type dict when ' @@ -3107,8 +3117,8 @@ def astype(self, dtype, copy=True, raise_on_error=True, **kwargs): return concat(results, axis=1, copy=False) # else, only a single dtype is given - new_data = self._data.astype(dtype=dtype, copy=copy, - raise_on_error=raise_on_error, **kwargs) + new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, + **kwargs) return self._constructor(new_data).__finalize__(self) def copy(self, deep=True): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 05ac3356c1770..aa865ae430d4a 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -455,17 +455,23 @@ def downcast(self, dtypes=None, mgr=None): return blocks - def astype(self, dtype, copy=False, raise_on_error=True, values=None, - **kwargs): - return self._astype(dtype, copy=copy, raise_on_error=raise_on_error, - values=values, **kwargs) + def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): + return self._astype(dtype, copy=copy, errors=errors, values=values, + **kwargs) - def _astype(self, dtype, copy=False, raise_on_error=True, values=None, + def _astype(self, dtype, copy=False, errors='raise', values=None, klass=None, mgr=None, **kwargs): """ Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ + errors_legal_values = ('raise', 'ignore') + + if errors not in errors_legal_values: + invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. " + "Supplied value is '{}'".format( + list(errors_legal_values), errors)) + raise ValueError(invalid_arg) # may need to convert to categorical # this is only called for non-categoricals @@ -507,7 +513,7 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None, newb = make_block(values, placement=self.mgr_locs, dtype=dtype, klass=klass) except: - if raise_on_error is True: + if errors == 'raise': raise newb = self.copy() if copy else self @@ -2147,7 +2153,7 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): return self.make_block_same_class(new_values, new_mgr_locs) - def _astype(self, dtype, copy=False, raise_on_error=True, values=None, + def _astype(self, dtype, copy=False, errors='raise', values=None, klass=None, mgr=None): """ Coerce to the new type (if copy=True, return a new copy) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 43a108e9acc80..95c5e7ea6e9fc 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -357,7 +357,7 @@ def test_astype_with_exclude_string(self): df = self.frame.copy() expected = self.frame.astype(int) df['string'] = 'foo' - casted = df.astype(int, raise_on_error=False) + casted = df.astype(int, errors='ignore') expected['string'] = 'foo' assert_frame_equal(casted, expected) @@ -365,7 +365,7 @@ def test_astype_with_exclude_string(self): df = self.frame.copy() expected = self.frame.astype(np.int32) df['string'] = 'foo' - casted = df.astype(np.int32, raise_on_error=False) + casted = df.astype(np.int32, errors='ignore') expected['string'] = 'foo' assert_frame_equal(casted, expected) @@ -523,6 +523,19 @@ def test_timedeltas(self): result = df.get_dtype_counts().sort_values() assert_series_equal(result, expected) + def test_arg_for_errors_in_astype(self): + # issue #14878 + + df = DataFrame([1, 2, 3]) + + with self.assertRaises(ValueError): + df.astype(np.float64, errors=True) + + with tm.assert_produces_warning(FutureWarning): + df.astype(np.int8, raise_on_error=False) + + df.astype(np.int8, errors='ignore') + class TestDataFrameDatetimeWithTZ(tm.TestCase, TestData): diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 3eafbaf912797..bf9c64276b693 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -168,3 +168,16 @@ def test_complexx(self): b.real = np.arange(5) + 5 tm.assert_numpy_array_equal(a + 5, b.real) tm.assert_numpy_array_equal(4 * a, b.imag) + + def test_arg_for_errors_in_astype(self): + # issue #14878 + + sr = Series([1, 2, 3]) + + with self.assertRaises(ValueError): + sr.astype(np.float64, errors=False) + + with tm.assert_produces_warning(FutureWarning): + sr.astype(np.int8, raise_on_error=True) + + sr.astype(np.int8, errors='raise') diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index db1c8da4cae73..32e8f44e6f258 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -553,7 +553,7 @@ def test_astype(self): 'e: f4; f: f2; g: f8') for t in ['float16', 'float32', 'float64', 'int32', 'int64']: t = np.dtype(t) - tmgr = mgr.astype(t, raise_on_error=False) + tmgr = mgr.astype(t, errors='ignore') self.assertEqual(tmgr.get('c').dtype.type, t) self.assertEqual(tmgr.get('e').dtype.type, t) self.assertEqual(tmgr.get('f').dtype.type, t)
- [x] closes #14878 - [x] tests passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry Please check that the entry in `whatsnew/v0.20.0.txt`. Unsure that the update was in the `_whatsnew_0200.deprecations` or `_whatsnew_0200.prior_deprecations` so put it in the former.
https://api.github.com/repos/pandas-dev/pandas/pulls/14967
2016-12-22T20:43:29Z
2017-01-03T20:53:59Z
2017-01-03T20:53:59Z
2017-01-04T10:13:23Z
[DOC] typo correction
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 6de6abed9a681..e09d240ed91b7 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -494,13 +494,13 @@ For example, let us consider ``Series`` object which index has minute resolution '2012-01-01 00:02:00'])) series_minute.index.resolution -A Timestamp string less accurate than a minute gives a ``Series`` object. +A timestamp string less accurate than a minute gives a ``Series`` object. .. ipython:: python series_minute['2011-12-31 23'] -A Timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice. +A timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice. .. ipython:: python
Follow-up to #14856. I believe that the word *timestamp* here should be de-capitalized at is not a name of object (like `Timestamp`), just a common noun.
https://api.github.com/repos/pandas-dev/pandas/pulls/14966
2016-12-22T20:21:10Z
2016-12-23T00:18:02Z
2016-12-23T00:18:02Z
2016-12-23T18:02:00Z
ERR: raise on missing values in pd.pivot_table
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index e7b2fc5a6505d..0b13d78f0377e 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -300,3 +300,4 @@ Bug Fixes - Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) - Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) +- Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f8d9d73590a60..f609de23d9189 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -5725,6 +5725,20 @@ def test_group_shift_with_null_key(self): assert_frame_equal(result, expected) + def test_pivot_table_values_key_error(self): + # This test is designed to replicate the error in issue #14938 + df = pd.DataFrame({'eventDate': + pd.date_range(pd.datetime.today(), + periods=20, freq='M').tolist(), + 'thename': range(0, 20)}) + + df['year'] = df.set_index('eventDate').index.year + df['month'] = df.set_index('eventDate').index.month + + with self.assertRaises(KeyError): + df.reset_index().pivot_table(index='year', columns='month', + values='badname', aggfunc='count') + def test_agg_over_numpy_arrays(self): # GH 3788 df = pd.DataFrame([[1, np.array([10, 20, 30])], diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 820a545363ee3..0f56b0b076897 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -107,6 +107,11 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', values_multi = False values = [values] + # GH14938 Make sure value labels are in data + for i in values: + if i not in data: + raise KeyError(i) + to_filter = [] for x in keys + values: if isinstance(x, Grouper):
- [x] closes #14938 - [x] tests added - for #14938 - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14965
2016-12-22T20:15:28Z
2016-12-23T20:49:57Z
2016-12-23T20:49:56Z
2016-12-23T20:52:00Z
DOC :Cheatsheet update.
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf new file mode 100644 index 0000000000000..a2b222c683564 Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx new file mode 100644 index 0000000000000..5202256006ddf Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt new file mode 100644 index 0000000000000..e2f6ec042e9cc --- /dev/null +++ b/doc/cheatsheet/README.txt @@ -0,0 +1,4 @@ +The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013. +To create the PDF version, within Powerpoint, simply do a "Save As" +and pick "PDF' as the format. + diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index 0567a3c3fa2bb..f21c28d558896 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -76,3 +76,11 @@ Bug Fixes - Explicit check in ``to_stata`` and ``StataWriter`` for out-of-range values when writing doubles (:issue:`14618`) + +.. _whatsnew_0192.documentation: + +Documentation +~~~~~~~~~~~~~ + +- Added a Pandas Cheat Sheet (:issue:`13202`). Can be found `here <http://pandas.pydata.org/docs/Pandas_Cheat_Sheet.pdf>`__. +
Update to cheatsheet, from suggestions from a colleague. 1. Fix bug in example for outer join 2. Add head() and tail() and use space for nsmallest() 3. Formatting of logic table.
https://api.github.com/repos/pandas-dev/pandas/pulls/14963
2016-12-22T18:32:58Z
2016-12-23T00:00:28Z
null
2016-12-23T20:52:03Z
BUG: Series.ffill() with mixed dtypes containing tz-aware datetimes f…
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index e7b2fc5a6505d..54824faf2c8e8 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -300,3 +300,4 @@ Bug Fixes - Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) - Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) +- Bug in Series.ffill() with mixed dtypes containing tz-aware datetimes. (:issue:`14956`) diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 97d786bf82b7c..b6b13def193ff 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -859,7 +859,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, # we try to coerce datetime w/tz but must all have the same tz if seen_datetimetz: - if len(set([ getattr(val, 'tz', None) for val in objects ])) == 1: + if len(set([getattr(val, 'tzinfo', None) for val in objects])) == 1: from pandas import DatetimeIndex return DatetimeIndex(objects) seen_object = 1 diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index ed558275674c7..3c82e4ed82969 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -386,6 +386,12 @@ def test_ffill(self): ts[2] = np.NaN assert_series_equal(ts.ffill(), ts.fillna(method='ffill')) + def test_ffill_mixed_dtypes_without_missing_data(self): + # GH14956 + series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1]) + result = series.ffill() + assert_series_equal(series, result) + def test_bfill(self): ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5)) ts[2] = np.NaN diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index fb8f3ca0b5b58..43db84a5b8c62 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -11,6 +11,7 @@ import re from datetime import datetime, date, timedelta, time import numpy as np +import pytz import pandas as pd from pandas import lib, tslib @@ -275,6 +276,14 @@ def test_maybe_convert_objects_uint64(self): exp = np.array([2**63, -1], dtype=object) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + def test_mixed_dtypes_remain_object_array(self): + # GH14956 + array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], + dtype=object) + result = lib.maybe_convert_objects(array) + tm.assert_numpy_array_equal(result, array) + self.assertTrue(result.dtype == object) + class TestTypeInference(tm.TestCase): _multiprocess_can_split_ = True
…ails. (GH14956) Seems to work with all the datetime classes usually encountered, although 'tz' seems to be the idiom in the codebase (not sure why?). If both need to be supported I can replace `getattr(val, 'tzinfo', None)` with `getattr(val, 'tz', None) or getattr(val, 'tzinfo', None)`, thus also giving precedence to the former (if available). (breaking commit was 4de83d25d751d8ca102867b2d46a5547c01d7248) - [ x] closes #14956 - [x] tests added / passed - [x ] passes ``git diff upstream/master | flake8 --diff`` - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14960
2016-12-22T17:12:15Z
2016-12-23T20:59:22Z
null
2016-12-23T20:59:55Z
TST: matplotlib 2.0 fix in log limits for barplot (GH14808)
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 73119fec88198..6878ca0e1bc06 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -216,15 +216,22 @@ def test_bar_log(self): if not self.mpl_le_1_2_1: expected = np.hstack((1.0e-04, expected, 1.0e+01)) + if self.mpl_ge_2_0_0: + expected = np.hstack((1.0e-05, expected)) ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar') + ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001 ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001 - self.assertEqual(ax.get_ylim(), (0.001, ymax)) + res = ax.get_ylim() + self.assertAlmostEqual(res[0], ymin) + self.assertAlmostEqual(res[1], ymax) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh') - self.assertEqual(ax.get_xlim(), (0.001, ymax)) + res = ax.get_xlim() + self.assertAlmostEqual(res[0], ymin) + self.assertAlmostEqual(res[1], ymax) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) @slow
Closes #14808
https://api.github.com/repos/pandas-dev/pandas/pulls/14957
2016-12-22T14:06:45Z
2016-12-24T10:50:47Z
2016-12-24T10:50:47Z
2016-12-24T14:04:07Z
TST: Groupby.groups of datetimeindex (#11442)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f8d9d73590a60..f86928ccec8bd 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3933,6 +3933,27 @@ def test_groupby_groups_datetimeindex(self): groups = grouped.groups tm.assertIsInstance(list(groups.keys())[0], datetime) + # GH 11442 + index = pd.date_range('2015/01/01', periods=5, name='date') + df = pd.DataFrame({'A': [5, 6, 7, 8, 9], + 'B': [1, 2, 3, 4, 5]}, index=index) + result = df.groupby(level='date').groups + dates = ['2015-01-05', '2015-01-04', '2015-01-03', + '2015-01-02', '2015-01-01'] + expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date') + for date in dates} + tm.assert_dict_equal(result, expected) + + grouped = df.groupby(level='date') + for date in dates: + result = grouped.get_group(date) + data = [[df.loc[date, 'A'], df.loc[date, 'B']]] + expected_index = pd.DatetimeIndex([date], name='date') + expected = pd.DataFrame(data, + columns=list('AB'), + index=expected_index) + tm.assert_frame_equal(result, expected) + def test_groupby_groups_datetimeindex_tz(self): # GH 3950 dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
- [x] closes #11442 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Doesn't look a PR in 0.19.2 and 0.20.0 addressed this, but it works on master 0.19.1
https://api.github.com/repos/pandas-dev/pandas/pulls/14952
2016-12-22T06:45:33Z
2016-12-23T11:16:25Z
2016-12-23T11:16:24Z
2017-12-20T02:03:40Z
BUG: Patch maybe_convert_objects uint64 handling
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 2f829417f9bb2..97d786bf82b7c 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -810,7 +810,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats[i] = <float64_t> val complexes[i] = <double complex> val if not seen_null: - seen_uint = seen_uint or (val > npy_int64_max) + seen_uint = seen_uint or (int(val) > npy_int64_max) seen_sint = seen_sint or (val < 0) if seen_uint and seen_sint: diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index f83ad51c2f648..fb8f3ca0b5b58 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -260,6 +260,13 @@ def test_maybe_convert_objects_uint64(self): exp = np.array([2**63], dtype=np.uint64) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + # NumPy bug: can't compare uint64 to int64, as that + # results in both casting to float64, so we should + # make sure that this function is robust against it + arr = np.array([np.uint64(2**63)], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + arr = np.array([2, -1], dtype=object) exp = np.array([2, -1], dtype=np.int64) tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
Makes method robust against known `numpy` bug that you can't compare `uint64` against `int64` because they are casted to `float64` during the comparison, causing truncation. xref #14937. Follow-up to #14916.
https://api.github.com/repos/pandas-dev/pandas/pulls/14951
2016-12-22T06:07:36Z
2016-12-22T11:27:30Z
2016-12-22T11:27:30Z
2016-12-22T14:50:34Z
ENH: GH14883: json_normalize now takes a user-specified separator
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 15566d207e31f..638044cee67bb 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -300,9 +300,9 @@ Other Enhancements - ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) - ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) - ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs <timedeltas.isoformat>` (:issue:`15136`) -- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) - ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`) - The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements + - ``pd.merge_asof()`` gained the option ``direction='backward'|'forward'|'nearest'`` (:issue:`14887`) - ``Series/DataFrame.asfreq()`` have gained a ``fill_value`` parameter, to fill missing values (:issue:`3715`). - ``Series/DataFrame.resample.asfreq`` have gained a ``fill_value`` parameter, to fill missing values during resampling (:issue:`3715`). @@ -313,11 +313,15 @@ Other Enhancements - ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) - ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs <categorical.union>` for more information. -- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`) - ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`) - Re-enable the ``parse_dates`` keyword of ``read_excel`` to parse string columns as dates (:issue:`14326`) - Added ``.empty`` property to subclasses of ``Index``. (:issue:`15270`) +- ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) +- ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`) +- ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`) + + .. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 4da4a6ad57850..518e0bc2064e2 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -21,7 +21,7 @@ def _convert_to_line_delimits(s): return convert_json_to_lines(s) -def nested_to_record(ds, prefix="", level=0): +def nested_to_record(ds, prefix="", sep=".", level=0): """a simplified json_normalize converts a nested dict into a flat dict ("record"), unlike json_normalize, @@ -31,6 +31,12 @@ def nested_to_record(ds, prefix="", level=0): ---------- ds : dict or list of dicts prefix: the prefix, optional, default: "" + sep : string, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + .. versionadded:: 0.20.0 + level: the number of levels in the jason string, optional, default: 0 Returns @@ -66,7 +72,7 @@ def nested_to_record(ds, prefix="", level=0): if level == 0: newkey = k else: - newkey = prefix + '.' + k + newkey = prefix + sep + k # only dicts gets recurse-flattend # only at level>1 do we rename the rest of the keys @@ -77,7 +83,7 @@ def nested_to_record(ds, prefix="", level=0): continue else: v = new_d.pop(k) - new_d.update(nested_to_record(v, newkey, level + 1)) + new_d.update(nested_to_record(v, newkey, sep, level + 1)) new_ds.append(new_d) if singleton: @@ -88,7 +94,8 @@ def nested_to_record(ds, prefix="", level=0): def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, - errors='raise'): + errors='raise', + sep='.'): """ "Normalize" semi-structured JSON data into a flat table @@ -106,13 +113,21 @@ def json_normalize(data, record_path=None, meta=None, path to records is ['foo', 'bar'] meta_prefix : string, default None errors : {'raise', 'ignore'}, default 'raise' - * 'ignore' : will ignore KeyError if keys listed in meta are not - always present - * 'raise' : will raise KeyError if keys listed in meta are not - always present + + * ignore : will ignore KeyError if keys listed in meta are not + always present + * raise : will raise KeyError if keys listed in meta are not + always present .. versionadded:: 0.20.0 + sep : string, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + .. versionadded:: 0.20.0 + + Returns ------- frame : DataFrame @@ -173,7 +188,7 @@ def _pull_field(js, spec): # # TODO: handle record value which are lists, at least error # reasonably - data = nested_to_record(data) + data = nested_to_record(data, sep=sep) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] @@ -192,7 +207,9 @@ def _pull_field(js, spec): lengths = [] meta_vals = defaultdict(list) - meta_keys = ['.'.join(val) for val in meta] + if not isinstance(sep, compat.string_types): + sep = str(sep) + meta_keys = [sep.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if len(path) > 1: diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index f881f4dafe0f3..ee79859e9b71a 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -1,36 +1,60 @@ -from pandas import DataFrame +import pytest import numpy as np import json import pandas.util.testing as tm -from pandas import compat +from pandas import compat, Index, DataFrame from pandas.io.json import json_normalize from pandas.io.json.normalize import nested_to_record -def _assert_equal_data(left, right): - if not left.columns.equals(right.columns): - left = left.reindex(columns=right.columns) +@pytest.fixture +def deep_nested(): + # deeply nested data + return [{'country': 'USA', + 'states': [{'name': 'California', + 'cities': [{'name': 'San Francisco', + 'pop': 12345}, + {'name': 'Los Angeles', + 'pop': 12346}] + }, + {'name': 'Ohio', + 'cities': [{'name': 'Columbus', + 'pop': 1234}, + {'name': 'Cleveland', + 'pop': 1236}]} + ] + }, + {'country': 'Germany', + 'states': [{'name': 'Bayern', + 'cities': [{'name': 'Munich', 'pop': 12347}] + }, + {'name': 'Nordrhein-Westfalen', + 'cities': [{'name': 'Duesseldorf', 'pop': 1238}, + {'name': 'Koeln', 'pop': 1239}]} + ] + } + ] - tm.assert_frame_equal(left, right) +@pytest.fixture +def state_data(): + return [ + {'counties': [{'name': 'Dade', 'population': 12345}, + {'name': 'Broward', 'population': 40000}, + {'name': 'Palm Beach', 'population': 60000}], + 'info': {'governor': 'Rick Scott'}, + 'shortname': 'FL', + 'state': 'Florida'}, + {'counties': [{'name': 'Summit', 'population': 1234}, + {'name': 'Cuyahoga', 'population': 1337}], + 'info': {'governor': 'John Kasich'}, + 'shortname': 'OH', + 'state': 'Ohio'}] -class TestJSONNormalize(tm.TestCase): - def setUp(self): - self.state_data = [ - {'counties': [{'name': 'Dade', 'population': 12345}, - {'name': 'Broward', 'population': 40000}, - {'name': 'Palm Beach', 'population': 60000}], - 'info': {'governor': 'Rick Scott'}, - 'shortname': 'FL', - 'state': 'Florida'}, - {'counties': [{'name': 'Summit', 'population': 1234}, - {'name': 'Cuyahoga', 'population': 1337}], - 'info': {'governor': 'John Kasich'}, - 'shortname': 'OH', - 'state': 'Ohio'}] +class TestJSONNormalize(object): def test_simple_records(self): recs = [{'a': 1, 'b': 2, 'c': 3}, @@ -43,21 +67,21 @@ def test_simple_records(self): tm.assert_frame_equal(result, expected) - def test_simple_normalize(self): - result = json_normalize(self.state_data[0], 'counties') - expected = DataFrame(self.state_data[0]['counties']) + def test_simple_normalize(self, state_data): + result = json_normalize(state_data[0], 'counties') + expected = DataFrame(state_data[0]['counties']) tm.assert_frame_equal(result, expected) - result = json_normalize(self.state_data, 'counties') + result = json_normalize(state_data, 'counties') expected = [] - for rec in self.state_data: + for rec in state_data: expected.extend(rec['counties']) expected = DataFrame(expected) tm.assert_frame_equal(result, expected) - result = json_normalize(self.state_data, 'counties', meta='state') + result = json_normalize(state_data, 'counties', meta='state') expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2]) tm.assert_frame_equal(result, expected) @@ -67,33 +91,30 @@ def test_empty_array(self): expected = DataFrame() tm.assert_frame_equal(result, expected) - def test_more_deeply_nested(self): - data = [{'country': 'USA', - 'states': [{'name': 'California', - 'cities': [{'name': 'San Francisco', - 'pop': 12345}, - {'name': 'Los Angeles', - 'pop': 12346}] - }, - {'name': 'Ohio', - 'cities': [{'name': 'Columbus', - 'pop': 1234}, - {'name': 'Cleveland', - 'pop': 1236}]} - ] - }, - {'country': 'Germany', - 'states': [{'name': 'Bayern', - 'cities': [{'name': 'Munich', 'pop': 12347}] - }, - {'name': 'Nordrhein-Westfalen', - 'cities': [{'name': 'Duesseldorf', 'pop': 1238}, - {'name': 'Koeln', 'pop': 1239}]} - ] - } - ] + def test_simple_normalize_with_separator(self, deep_nested): + # GH 14883 + result = json_normalize({'A': {'A': 1, 'B': 2}}) + expected = DataFrame([[1, 2]], columns=['A.A', 'A.B']) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_') + expected = DataFrame([[1, 2]], columns=['A_A', 'A_B']) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3') + expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B']) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize(deep_nested, ['states', 'cities'], + meta=['country', ['states', 'name']], + sep='_') + expected = Index(['name', 'pop', + 'country', 'states_name']).sort_values() + assert result.columns.sort_values().equals(expected) + + def test_more_deeply_nested(self, deep_nested): - result = json_normalize(data, ['states', 'cities'], + result = json_normalize(deep_nested, ['states', 'cities'], meta=['country', ['states', 'name']]) # meta_prefix={'states': 'state_'}) @@ -143,26 +164,26 @@ def test_meta_name_conflict(self): 'data': [{'foo': 'something', 'bar': 'else'}, {'foo': 'something2', 'bar': 'else2'}]}] - self.assertRaises(ValueError, json_normalize, data, - 'data', meta=['foo', 'bar']) + with pytest.raises(ValueError): + json_normalize(data, 'data', meta=['foo', 'bar']) result = json_normalize(data, 'data', meta=['foo', 'bar'], meta_prefix='meta') for val in ['metafoo', 'metabar', 'foo', 'bar']: - self.assertTrue(val in result) + assert val in result - def test_record_prefix(self): - result = json_normalize(self.state_data[0], 'counties') - expected = DataFrame(self.state_data[0]['counties']) + def test_record_prefix(self, state_data): + result = json_normalize(state_data[0], 'counties') + expected = DataFrame(state_data[0]['counties']) tm.assert_frame_equal(result, expected) - result = json_normalize(self.state_data, 'counties', + result = json_normalize(state_data, 'counties', meta='state', record_prefix='county_') expected = [] - for rec in self.state_data: + for rec in state_data: expected.extend(rec['counties']) expected = DataFrame(expected) expected = expected.rename(columns=lambda x: 'county_' + x)
- [x] closes #14883 - [ ] tests added / passed (added 3 tests: `test_simple_normalize_with_{default, user_specified, user_specified_unicode}_separator`) - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry (v0.20.0) However, this doesn't work, even after making the fixes suggested in https://github.com/pandas-dev/pandas/pull/14891. I thought replacing `'.'` in `meta_keys = ['.'.join(val) for val in meta]` with `sep` would do the trick. It doesn't, so I'm a little puzzled. Happy to take a suggestion.
https://api.github.com/repos/pandas-dev/pandas/pulls/14950
2016-12-22T00:05:26Z
2017-03-28T21:50:38Z
null
2017-03-28T22:07:01Z
added 'separator' argument to json_normalize
diff --git a/pandas/io/json.py b/pandas/io/json.py index 0a6b8af179e12..fa123d5783958 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -24,8 +24,8 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch', default_handler=None, lines=False): if lines and orient != 'records': - raise ValueError( - "'lines' keyword only valid when 'orient' is records") + raise ValueError( + "'lines' keyword only valid when 'orient' is records") if isinstance(obj, Series): s = SeriesWriter( @@ -726,8 +726,8 @@ def nested_to_record(ds, prefix="", level=0): def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, + separator='.', errors='raise'): - """ "Normalize" semi-structured JSON data into a flat table @@ -744,6 +744,9 @@ def json_normalize(data, record_path=None, meta=None, If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] meta_prefix : string, default None + separator : string, default '.' + Nested records will generate names separated by separator, + e.g., for separator='.', { 'foo' : { 'bar' : 0 } } -> foo.bar errors : {'raise', 'ignore'}, default 'raise' * ignore : will ignore KeyError if keys listed in meta are not always present @@ -828,7 +831,7 @@ def _pull_field(js, spec): lengths = [] meta_vals = defaultdict(list) - meta_keys = ['.'.join(val) for val in meta] + meta_keys = [separator.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if len(path) > 1:
- [x] closes #14883 - [ ] tests added / passed (added 3 tests: `test_simple_normalize_with_{default, user_specified, user_specified_unicode}_separator`) - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry (v0.20.0) However, this doesn't work, even after making the fixes suggested in https://github.com/pandas-dev/pandas/pull/14891. I thought replacing `'.'` in `meta_keys = ['.'.join(val) for val in meta]` with `sep` would do the trick. It doesn't, so I'm a little puzzled. Happy to take a suggestion.
https://api.github.com/repos/pandas-dev/pandas/pulls/14949
2016-12-21T23:39:58Z
2016-12-21T23:53:19Z
null
2016-12-22T00:06:02Z
Clarified error in read_sas method when buffer object provided withou…
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 27f8564ba16c2..b70b3c1cea2b5 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -271,6 +271,7 @@ Performance Improvements - Improved performance of ``pd.wide_to_long()`` (:issue:`14779`) - Increased performance of ``pd.factorize()`` by releasing the GIL with ``object`` dtype when inferred as strings (:issue:`14859`) +- When reading buffer object in ``read_sas()`` method without specified format, filepath string is inferred rather than buffer object. .. _whatsnew_0200.bug_fixes: diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 081d780f71cb3..6f7e4c0d213bc 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -1,10 +1,12 @@ """ Read SAS sas7bdat or xport files. """ +from pandas import compat def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): + """ Read SAS files stored as either XPORT or SAS7BDAT format files. @@ -29,8 +31,12 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader """ - if format is None: + buffer_error_msg = ("If this is a buffer object rather" + "than a string name, you must specify" + " a format string") + if not isinstance(filepath_or_buffer, compat.string_types): + raise TypeError(buffer_error_msg) try: fname = filepath_or_buffer.lower() if fname.endswith(".xpt"): diff --git a/pandas/io/tests/sas/test_sas.py b/pandas/io/tests/sas/test_sas.py new file mode 100644 index 0000000000000..2a55a2d68157c --- /dev/null +++ b/pandas/io/tests/sas/test_sas.py @@ -0,0 +1,11 @@ +import pandas.util.testing as tm +from pandas.compat import StringIO +from pandas import read_sas + + +class TestSas(tm.TestCase): + + def test_sas_buffer_format(self): + b = StringIO("") + with self.assertRaises(TypeError): + read_sas(b)
…t format - [x] closes #14947 - [x] tests added / passed - [X] passes ``git diff upstream/master | flake8 --diff`` - [X] whatsnew entry Added three lines to sasreader.py immediately following line 33 (if format==None:) to handle the case when a buffer object is provided without a format='sas7bdat' or format='xport' situation. Method otherwise works splendidly when a filepath is provided, but a buffer object fails. This is an issue when using sasreader directly on SFTP file objects. I am unaware of any bug request (and am happy to open one), but I came across this issue when using the library.
https://api.github.com/repos/pandas-dev/pandas/pulls/14947
2016-12-21T22:01:26Z
2017-01-09T18:33:06Z
null
2017-01-09T18:33:10Z
BUG: sorting with large float and multiple columns incorrect
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 83a70aa34fccf..9fa55964de1c1 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -246,6 +246,7 @@ Bug Fixes - Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) +- Bug in ``sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6bcd3776867b6..706cc9b0f026b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -343,7 +343,8 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): table = hash_klass(size_hint or len(vals)) uniques = vec_klass() - labels = table.get_labels(vals, uniques, 0, na_sentinel, True) + check_nulls = not is_integer_dtype(values) + labels = table.get_labels(vals, uniques, 0, na_sentinel, check_nulls) labels = _ensure_platform_int(labels) diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index b7a38e9e13ebd..579a4bf5d54d5 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -6,7 +6,7 @@ from pandas.compat import lrange from pandas import (DataFrame, Series, MultiIndex, Timestamp, - date_range) + date_range, NaT) from pandas.util.testing import (assert_series_equal, assert_frame_equal, @@ -491,3 +491,49 @@ def test_frame_column_inplace_sort_exception(self): cp = s.copy() cp.sort_values() # it works! + + def test_sort_nat_values_in_int_column(self): + + # GH 14922: "sorting with large float and multiple columns incorrect" + + # cause was that the int64 value NaT was considered as "na". Which is + # only correct for datetime64 columns. + + int_values = (2, int(NaT)) + float_values = (2.0, -1.797693e308) + + df = DataFrame(dict(int=int_values, float=float_values), + columns=["int", "float"]) + + df_reversed = DataFrame(dict(int=int_values[::-1], + float=float_values[::-1]), + columns=["int", "float"], + index=[1, 0]) + + # NaT is not a "na" for int64 columns, so na_position must not + # influence the result: + df_sorted = df.sort_values(["int", "float"], na_position="last") + assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["int", "float"], na_position="first") + assert_frame_equal(df_sorted, df_reversed) + + # reverse sorting order + df_sorted = df.sort_values(["int", "float"], ascending=False) + assert_frame_equal(df_sorted, df) + + # and now check if NaT is still considered as "na" for datetime64 + # columns: + df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT], + float=float_values), columns=["datetime", "float"]) + + df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")], + float=float_values[::-1]), + columns=["datetime", "float"], + index=[1, 0]) + + df_sorted = df.sort_values(["datetime", "float"], na_position="first") + assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["datetime", "float"], na_position="last") + assert_frame_equal(df_sorted, df_reversed)
Fixes https://github.com/pandas-dev/pandas/issues/14922 Having the `int` equivalent of `NaT` in an `int64` column caused wrong sorting because this special value was considered as "missing value". - [ ] closes #xxxx - [X] tests added / passed - [X] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14944
2016-12-21T17:36:44Z
2016-12-23T21:04:14Z
null
2016-12-23T21:04:14Z
DOC: Pandas Cheat Sheet (GH13202)
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf new file mode 100644 index 0000000000000..a0bff02d45f91 Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx new file mode 100644 index 0000000000000..399edf84e7d1c Binary files /dev/null and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ diff --git a/doc/cheatsheet/README.txt b/doc/cheatsheet/README.txt new file mode 100644 index 0000000000000..e2f6ec042e9cc --- /dev/null +++ b/doc/cheatsheet/README.txt @@ -0,0 +1,4 @@ +The Pandas Cheat Sheet was created using Microsoft Powerpoint 2013. +To create the PDF version, within Powerpoint, simply do a "Save As" +and pick "PDF' as the format. + diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index 0567a3c3fa2bb..f21c28d558896 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -76,3 +76,11 @@ Bug Fixes - Explicit check in ``to_stata`` and ``StataWriter`` for out-of-range values when writing doubles (:issue:`14618`) + +.. _whatsnew_0192.documentation: + +Documentation +~~~~~~~~~~~~~ + +- Added a Pandas Cheat Sheet (:issue:`13202`). Can be found `here <http://pandas.pydata.org/docs/Pandas_Cheat_Sheet.pdf>`__. +
- [x ] closes #13202 Pandas Cheat Sheet. Someone needs to figure out (or tell me the best way to do it!) how the PDF version gets integrated into the documentation tree.
https://api.github.com/repos/pandas-dev/pandas/pulls/14943
2016-12-21T16:50:47Z
2016-12-21T17:23:05Z
null
2016-12-21T17:24:42Z
ENH: Create and propagate UInt64Index
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 2a825edd0e98a..0e682874b4b73 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -91,6 +91,25 @@ support for bz2 compression in the python 2 c-engine improved (:issue:`14874`). df = pd.read_table(url, compression='bz2') # explicitly specify compression df.head(2) +.. _whatsnew_0200.enhancements.uint64_support: + +Pandas has significantly improved support for operations involving unsigned, +or purely non-negative, integers. Previously, handling these integers would +result in improper rounding or data-type casting, leading to incorrect results. +Notably, a new numerical index, ``UInt64Index``, has been created (:issue:`14937`) + +.. ipython:: python + + idx = pd.UInt64Index([1, 2, 3]) + df = pd.DataFrame({'A': ['a', 'b', 'c']}, index=idx) + df.index + +- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`, :issue:`14982`) +- Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) +- Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) +- Bug in ``pd.read_csv()`` in which unsigned 64-bit integer elements were being improperly converted to the wrong data types (:issue:`14983`) +- Bug in ``pd.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14915`) + .. _whatsnew_0200.enhancements.other: Other enhancements @@ -298,8 +317,6 @@ Bug Fixes - Bug in ``Index`` power operations with reversed operands (:issue:`14973`) - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) -- Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) -- Bug in ``pd.read_csv()`` in which unsigned 64-bit integer elements were being improperly converted to the wrong data types (:issue:`14983`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) - Bug in ``DataFrame(..).apply(to_numeric)`` when values are of type decimal.Decimal. (:issue:`14827`) - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) @@ -324,8 +341,6 @@ Bug Fixes -- Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) -- Bug in ``pd.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14915`) @@ -350,7 +365,6 @@ Bug Fixes - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) -- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`, :issue:`14982`) - Bug in ``pd.pivot_table()`` where no error was raised when values argument was not in the columns (:issue:`14938`) - Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`) @@ -369,4 +383,4 @@ Bug Fixes - Bug in ``Series`` constructor when both ``copy=True`` and ``dtype`` arguments are provided (:issue:`15125`) - Bug in ``pd.read_csv()`` for the C engine where ``usecols`` were being indexed incorrectly with ``parse_dates`` (:issue:`14792`) -- Bug in ``Series.dt.round`` inconsistent behaviour on NAT's with different arguments (:issue:`14940`) \ No newline at end of file +- Bug in ``Series.dt.round`` inconsistent behaviour on NAT's with different arguments (:issue:`14940`) diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py index b13b4d7de60ca..78dfe46914200 100644 --- a/pandas/api/tests/test_api.py +++ b/pandas/api/tests/test_api.py @@ -53,7 +53,7 @@ class TestPDApi(Base, tm.TestCase): classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset', 'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index', 'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex', - 'Period', 'PeriodIndex', 'RangeIndex', + 'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index', 'Series', 'SparseArray', 'SparseDataFrame', 'SparseSeries', 'TimeGrouper', 'Timedelta', 'TimedeltaIndex', 'Timestamp'] diff --git a/pandas/core/api.py b/pandas/core/api.py index b5e1de2063c7e..177e7b31cbd4f 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -10,7 +10,8 @@ from pandas.core.groupby import Grouper from pandas.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, - RangeIndex, Float64Index, MultiIndex) + UInt64Index, RangeIndex, Float64Index, + MultiIndex) from pandas.core.series import Series, TimeSeries from pandas.core.frame import DataFrame diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 6970d1891ee63..0db5103a18807 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -860,15 +860,20 @@ def _convert_for_reindex(self, key, axis=0): return labels[key] else: if isinstance(key, Index): - # want Index objects to pass through untouched - keyarr = key + keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) - if is_integer_dtype(keyarr) and not labels.is_integer(): - keyarr = _ensure_platform_int(keyarr) - return labels.take(keyarr) + if is_integer_dtype(keyarr): + # Cast the indexer to uint64 if possible so + # that the values returned from indexing are + # also uint64. + keyarr = labels._convert_arr_indexer(keyarr) + + if not labels.is_integer(): + keyarr = _ensure_platform_int(keyarr) + return labels.take(keyarr) return keyarr @@ -1044,11 +1049,10 @@ def _getitem_iterable(self, key, axis=0): return self.obj.take(inds, axis=axis, convert=False) else: if isinstance(key, Index): - # want Index objects to pass through untouched - keyarr = key + keyarr = labels._convert_index_indexer(key) else: - # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) + keyarr = labels._convert_arr_indexer(keyarr) if is_categorical_dtype(labels): keyarr = labels._shallow_copy(keyarr) diff --git a/pandas/indexes/api.py b/pandas/indexes/api.py index 0b81c47488ef4..64992e46613e5 100644 --- a/pandas/indexes/api.py +++ b/pandas/indexes/api.py @@ -4,7 +4,7 @@ from pandas.indexes.category import CategoricalIndex # noqa from pandas.indexes.multi import MultiIndex # noqa from pandas.indexes.numeric import (NumericIndex, Float64Index, # noqa - Int64Index) + Int64Index, UInt64Index) from pandas.indexes.range import RangeIndex # noqa import pandas.core.common as com @@ -13,7 +13,7 @@ # TODO: there are many places that rely on these private methods existing in # pandas.core.index __all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index', - 'CategoricalIndex', 'RangeIndex', + 'CategoricalIndex', 'RangeIndex', 'UInt64Index', 'InvalidIndexError', '_new_Index', '_ensure_index', '_get_na_value', '_get_combined_index', diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index b87fb5dc84782..d0bf4edfbc5d2 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -27,6 +27,8 @@ is_object_dtype, is_categorical_dtype, is_bool_dtype, + is_signed_integer_dtype, + is_unsigned_integer_dtype, is_integer_dtype, is_float_dtype, is_datetime64_any_dtype, is_timedelta64_dtype, @@ -199,14 +201,25 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, data = np.array(data, copy=copy, dtype=dtype) elif inferred in ['floating', 'mixed-integer-float']: - # if we are actually all equal to integers + # If we are actually all equal to integers, # then coerce to integer - from .numeric import Int64Index, Float64Index + from .numeric import (Int64Index, UInt64Index, + Float64Index) try: - res = data.astype('i8') + res = data.astype('i8', copy=False) if (res == data).all(): return Int64Index(res, copy=copy, name=name) + except (OverflowError, TypeError, ValueError): + pass + + # Conversion to int64 failed (possibly due to + # overflow), so let's try now with uint64. + try: + res = data.astype('u8', copy=False) + if (res == data).all(): + return UInt64Index(res, copy=copy, + name=name) except (TypeError, ValueError): pass @@ -235,10 +248,13 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, IncompatibleFrequency) if isinstance(data, PeriodIndex): return PeriodIndex(data, copy=copy, name=name, **kwargs) - if issubclass(data.dtype.type, np.integer): + if is_signed_integer_dtype(data.dtype): from .numeric import Int64Index return Int64Index(data, copy=copy, dtype=dtype, name=name) - elif issubclass(data.dtype.type, np.floating): + elif is_unsigned_integer_dtype(data.dtype): + from .numeric import UInt64Index + return UInt64Index(data, copy=copy, dtype=dtype, name=name) + elif is_float_dtype(data.dtype): from .numeric import Float64Index return Float64Index(data, copy=copy, dtype=dtype, name=name) elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): @@ -254,9 +270,13 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, if dtype is None: inferred = lib.infer_dtype(subarr) if inferred == 'integer': - from .numeric import Int64Index - return Int64Index(subarr.astype('i8'), copy=copy, - name=name) + from .numeric import Int64Index, UInt64Index + try: + return Int64Index(subarr.astype('i8'), copy=copy, + name=name) + except OverflowError: + return UInt64Index(subarr.astype('u8'), copy=copy, + name=name) elif inferred in ['floating', 'mixed-integer-float']: from .numeric import Float64Index return Float64Index(subarr, copy=copy, name=name) @@ -1253,6 +1273,40 @@ def is_int(v): return indexer + _index_shared_docs['_convert_arr_indexer'] = """ + Convert an array-like indexer to the appropriate dtype. + + Parameters + ---------- + keyarr : array-like + Indexer to convert. + + Returns + ------- + converted_keyarr : array-like + """ + + @Appender(_index_shared_docs['_convert_arr_indexer']) + def _convert_arr_indexer(self, keyarr): + return keyarr + + _index_shared_docs['_convert_index_indexer'] = """ + Convert an Index indexer to the appropriate dtype. + + Parameters + ---------- + keyarr : Index (or sub-class) + Indexer to convert. + + Returns + ------- + converted_keyarr : Index (or sub-class) + """ + + @Appender(_index_shared_docs['_convert_index_indexer']) + def _convert_index_indexer(self, keyarr): + return keyarr + def _convert_list_indexer(self, keyarr, kind=None): """ passed a key that is tuplesafe that is integer based @@ -3489,7 +3543,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") other = _values_from_object(other) - if other.dtype.kind not in ['f', 'i']: + if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") elif isinstance(other, (DateOffset, np.timedelta64, diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py index c71abe202226e..0b9b337731d7f 100644 --- a/pandas/indexes/numeric.py +++ b/pandas/indexes/numeric.py @@ -8,7 +8,7 @@ is_float_dtype, is_object_dtype, is_integer_dtype, is_scalar) from pandas.types.missing import isnull -from pandas.core.common import _values_from_object +from pandas.core.common import _asarray_tuplesafe, _values_from_object from pandas import compat from pandas.indexes.base import Index, InvalidIndexError, _index_shared_docs @@ -73,6 +73,13 @@ def _assert_safe_casting(cls, data, subarr): """ pass + @property + def is_all_dates(self): + """ + Checks that all the labels are datetime objects + """ + return False + _num_index_shared_docs['class_descr'] = """ Immutable ndarray implementing an ordered, sliceable set. The basic object @@ -128,13 +135,6 @@ def asi8(self): # do not cache or you'll create a memory leak return self.values.view('i8') - @property - def is_all_dates(self): - """ - Checks that all the labels are datetime objects - """ - return False - @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): assert kind in ['ix', 'loc', 'getitem', 'iloc', None] @@ -154,7 +154,7 @@ def _assert_safe_casting(cls, data, subarr): """ Ensure incoming data can be represented as ints. """ - if not issubclass(data.dtype.type, np.integer): + if not issubclass(data.dtype.type, np.signedinteger): if not np.array_equal(data, subarr): raise TypeError('Unsafe NumPy casting, you must ' 'explicitly cast') @@ -162,6 +162,84 @@ def _assert_safe_casting(cls, data, subarr): Int64Index._add_numeric_methods() Int64Index._add_logical_methods() +_uint64_descr_args = dict( + klass='UInt64Index', + ltype='unsigned integer', + dtype='uint64', + extra='' +) + + +class UInt64Index(NumericIndex): + __doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args + + _typ = 'uint64index' + _arrmap = _algos.arrmap_uint64 + _left_indexer_unique = _join.left_join_indexer_unique_uint64 + _left_indexer = _join.left_join_indexer_uint64 + _inner_indexer = _join.inner_join_indexer_uint64 + _outer_indexer = _join.outer_join_indexer_uint64 + + _can_hold_na = False + _na_value = 0 + + _engine_type = _index.UInt64Engine + + _default_dtype = np.uint64 + + @property + def inferred_type(self): + return 'integer' + + @property + def asi8(self): + # do not cache or you'll create a memory leak + return self.values.view('u8') + + @Appender(_index_shared_docs['_convert_scalar_indexer']) + def _convert_scalar_indexer(self, key, kind=None): + assert kind in ['ix', 'loc', 'getitem', 'iloc', None] + + # don't coerce ilocs to integers + if kind != 'iloc': + key = self._maybe_cast_indexer(key) + return (super(UInt64Index, self) + ._convert_scalar_indexer(key, kind=kind)) + + @Appender(_index_shared_docs['_convert_arr_indexer']) + def _convert_arr_indexer(self, keyarr): + # Cast the indexer to uint64 if possible so + # that the values returned from indexing are + # also uint64. + if is_integer_dtype(keyarr): + return _asarray_tuplesafe(keyarr, dtype=np.uint64) + return keyarr + + @Appender(_index_shared_docs['_convert_index_indexer']) + def _convert_index_indexer(self, keyarr): + # Cast the indexer to uint64 if possible so + # that the values returned from indexing are + # also uint64. + if keyarr.is_integer(): + return keyarr.astype(np.uint64) + return keyarr + + def _wrap_joined_index(self, joined, other): + name = self.name if self.name == other.name else None + return UInt64Index(joined, name=name) + + @classmethod + def _assert_safe_casting(cls, data, subarr): + """ + Ensure incoming data can be represented as uints. + """ + if not issubclass(data.dtype.type, np.unsignedinteger): + if not np.array_equal(data, subarr): + raise TypeError('Unsafe NumPy casting, you must ' + 'explicitly cast') + +UInt64Index._add_numeric_methods() +UInt64Index._add_logical_methods() _float64_descr_args = dict( klass='Float64Index', @@ -207,15 +285,6 @@ def astype(self, dtype, copy=True): @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): - """ - convert a scalar indexer - - Parameters - ---------- - key : label of the slice bound - kind : {'ix', 'loc', 'getitem'} or None - """ - assert kind in ['ix', 'loc', 'getitem', 'iloc', None] if kind == 'iloc': @@ -310,13 +379,6 @@ def get_loc(self, key, method=None, tolerance=None): return super(Float64Index, self).get_loc(key, method=method, tolerance=tolerance) - @property - def is_all_dates(self): - """ - Checks that all the labels are datetime objects - """ - return False - @cache_readonly def is_unique(self): return super(Float64Index, self).is_unique and self._nan_idxs.size < 2 diff --git a/pandas/src/algos_common_helper.pxi.in b/pandas/src/algos_common_helper.pxi.in index c1c190704b4c7..a579a5020f6e7 100644 --- a/pandas/src/algos_common_helper.pxi.in +++ b/pandas/src/algos_common_helper.pxi.in @@ -27,6 +27,7 @@ dtypes = [('float64', 'float64_t', 'np.float64', True, True), ('object', 'object', 'object', True, False), ('int32', 'int32_t', 'np.int32', False, True), ('int64', 'int64_t', 'np.int64', False, True), + ('uint64', 'uint64_t', 'np.uint64', False, True), ('bool', 'uint8_t', 'np.bool', False, True)] def get_dispatch(dtypes): diff --git a/pandas/src/index_class_helper.pxi.in b/pandas/src/index_class_helper.pxi.in index 315dd18009ad4..76c0deef7ebee 100644 --- a/pandas/src/index_class_helper.pxi.in +++ b/pandas/src/index_class_helper.pxi.in @@ -12,6 +12,7 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in # name, dtype, ctype dtypes = [('Float64', 'float64', 'float64_t'), + ('UInt64', 'uint64', 'uint64_t'), ('Int64', 'int64', 'int64_t'), ('Object', 'object', 'object')] }} diff --git a/pandas/src/join_helper.pxi.in b/pandas/src/join_helper.pxi.in index 5b55ec2b1bf6d..feb8cfb76a7f0 100644 --- a/pandas/src/join_helper.pxi.in +++ b/pandas/src/join_helper.pxi.in @@ -15,7 +15,8 @@ dtypes = [('float64', 'float64_t', 'np.float64'), ('float32', 'float32_t', 'np.float32'), ('object', 'object', 'object'), ('int32', 'int32_t', 'np.int32'), - ('int64', 'int64_t', 'np.int64')] + ('int64', 'int64_t', 'np.int64'), + ('uint64', 'uint64_t', 'np.uint64')] def get_dispatch(dtypes): @@ -404,4 +405,4 @@ def outer_join_indexer_{{name}}(ndarray[{{c_type}}] left, return result, lindexer, rindexer -{{endfor}} \ No newline at end of file +{{endfor}} diff --git a/pandas/src/joins_func_helper.pxi.in b/pandas/src/joins_func_helper.pxi.in index 33926a23f7f41..68c376492f8f2 100644 --- a/pandas/src/joins_func_helper.pxi.in +++ b/pandas/src/joins_func_helper.pxi.in @@ -12,7 +12,8 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in {{py: # table_type, by_dtype -by_dtypes = [('PyObjectHashTable', 'object'), ('Int64HashTable', 'int64_t')] +by_dtypes = [('PyObjectHashTable', 'object'), ('Int64HashTable', 'int64_t'), + ('UInt64HashTable', 'uint64_t')] # on_dtype on_dtypes = ['uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index abe40f7be1d90..02d288bdf6ea8 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2789,7 +2789,63 @@ def test_set_reset(self): result = df.reset_index() self.assertTrue(result['foo'].dtype, 'M8[ns, US/Eastern') - result = result.set_index('foo') + df = result.set_index('foo') + tm.assert_index_equal(df.index, idx) + + def test_transpose(self): + + result = self.df.T + expected = DataFrame(self.df.values.T) + expected.index = ['A', 'B'] + assert_frame_equal(result, expected) + + +class TestDataFrameIndexingUInt64(tm.TestCase, TestData): + + _multiprocess_can_split_ = True + + def setUp(self): + self.ir = Index(np.arange(3), dtype=np.uint64) + self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo') + + self.df = DataFrame({'A': self.idx, 'B': self.ir}) + + def test_setitem(self): + + df = self.df + idx = self.idx + + # setitem + df['C'] = idx + assert_series_equal(df['C'], Series(idx, name='C')) + + df['D'] = 'foo' + df['D'] = idx + assert_series_equal(df['D'], Series(idx, name='D')) + del df['D'] + + # With NaN: because uint64 has no NaN element, + # the column should be cast to object. + df2 = df.copy() + df2.iloc[1, 1] = pd.NaT + df2.iloc[1, 2] = pd.NaT + result = df2['B'] + assert_series_equal(notnull(result), Series( + [True, False, True], name='B')) + assert_series_equal(df2.dtypes, Series([np.dtype('uint64'), + np.dtype('O'), np.dtype('O')], + index=['A', 'B', 'C'])) + + def test_set_reset(self): + + idx = self.idx + + # set/reset + df = DataFrame({'A': [0, 1, 2]}, index=idx) + result = df.reset_index() + self.assertEqual(result['foo'].dtype, np.dtype('uint64')) + + df = result.set_index('foo') tm.assert_index_equal(df.index, idx) def test_transpose(self): diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 1b373baf9b3c1..63e9fe580d73d 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,8 +5,8 @@ import numpy as np -from pandas import (Series, Index, Float64Index, Int64Index, RangeIndex, - MultiIndex, CategoricalIndex, DatetimeIndex, +from pandas import (Series, Index, Float64Index, Int64Index, UInt64Index, + RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, notnull) from pandas.types.common import needs_i8_conversion from pandas.util.testing import assertRaisesRegexp @@ -470,10 +470,11 @@ def test_where(self): expected = i tm.assert_index_equal(result, expected) - i2 = i.copy() - i2 = pd.Index([np.nan, np.nan] + i[2:].tolist()) - result = i.where(notnull(i2)) - expected = i2 + _nan = i._na_value + cond = [False] + [True] * len(i[1:]) + expected = pd.Index([_nan] + i[1:].tolist(), dtype=i.dtype) + + result = i.where(cond) tm.assert_index_equal(result, expected) def test_setops_errorcases(self): @@ -660,6 +661,12 @@ def test_equals(self): self.assertFalse(idx.equals(list(idx))) self.assertFalse(idx.equals(np.array(idx))) + # Cannot pass in non-int64 dtype to RangeIndex + if not isinstance(idx, RangeIndex): + same_values = Index(idx, dtype=object) + self.assertTrue(idx.equals(same_values)) + self.assertTrue(same_values.equals(idx)) + if idx.nlevels == 1: # do not test MultiIndex self.assertFalse(idx.equals(pd.Series(idx))) @@ -744,7 +751,7 @@ def test_numpy_ufuncs(self): with tm.assertRaises(Exception): with np.errstate(all='ignore'): func(idx) - elif isinstance(idx, (Float64Index, Int64Index)): + elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): # coerces to float (e.g. np.sin) with np.errstate(all='ignore'): result = func(idx) @@ -765,7 +772,7 @@ def test_numpy_ufuncs(self): # raise TypeError or ValueError (PeriodIndex) with tm.assertRaises(Exception): func(idx) - elif isinstance(idx, (Float64Index, Int64Index)): + elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): # results in bool array result = func(idx) exp = func(idx.values) @@ -798,7 +805,7 @@ def test_hasnans_isnans(self): continue elif isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin): values[1] = pd.tslib.iNaT - elif isinstance(index, Int64Index): + elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan @@ -838,7 +845,7 @@ def test_fillna(self): if isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin): values[1] = pd.tslib.iNaT - elif isinstance(index, Int64Index): + elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 0e6773fd83404..a0f2a090c9a06 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -4,17 +4,18 @@ import pandas.util.testing as tm from pandas.indexes.api import Index, MultiIndex -from .common import Base +from pandas.tests.indexes.common import Base from pandas.compat import (range, lrange, lzip, u, - zip, PY3, PY36) + text_type, zip, PY3, PY36) import operator import os +import nose import numpy as np from pandas import (period_range, date_range, Series, - Float64Index, Int64Index, + DataFrame, Float64Index, Int64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex) from pandas.core.index import _get_combined_index @@ -40,6 +41,7 @@ def setUp(self): periodIndex=tm.makePeriodIndex(100), tdIndex=tm.makeTimedeltaIndex(100), intIndex=tm.makeIntIndex(100), + uintIndex=tm.makeUIntIndex(100), rangeIndex=tm.makeIntIndex(100), floatIndex=tm.makeFloatIndex(100), boolIndex=Index([True, False]), @@ -449,7 +451,7 @@ def test_delete(self): self.assertEqual(result.name, expected.name) with tm.assertRaises((IndexError, ValueError)): - # either depeidnig on numpy version + # either depending on numpy version result = idx.delete(5) def test_identical(self): @@ -2020,3 +2022,64 @@ def test_repeat(self): with tm.assert_produces_warning(FutureWarning): result = idx.repeat(n=repeats) tm.assert_index_equal(result, expected) + + def test_is_monotonic_na(self): + examples = [pd.Index([np.nan]), + pd.Index([np.nan, 1]), + pd.Index([1, 2, np.nan]), + pd.Index(['a', 'b', np.nan]), + pd.to_datetime(['NaT']), + pd.to_datetime(['NaT', '2000-01-01']), + pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), + pd.to_timedelta(['1 day', 'NaT']), ] + for index in examples: + self.assertFalse(index.is_monotonic_increasing) + self.assertFalse(index.is_monotonic_decreasing) + + def test_repr_summary(self): + with cf.option_context('display.max_seq_items', 10): + r = repr(pd.Index(np.arange(1000))) + self.assertTrue(len(r) < 200) + self.assertTrue("..." in r) + + def test_int_name_format(self): + index = Index(['a', 'b', 'c'], name=0) + s = Series(lrange(3), index) + df = DataFrame(lrange(3), index=index) + repr(s) + repr(df) + + def test_print_unicode_columns(self): + df = pd.DataFrame({u("\u05d0"): [1, 2, 3], + "\u05d1": [4, 5, 6], + "c": [7, 8, 9]}) + repr(df.columns) # should not raise UnicodeDecodeError + + def test_unicode_string_with_unicode(self): + idx = Index(lrange(1000)) + + if PY3: + str(idx) + else: + text_type(idx) + + def test_bytestring_with_unicode(self): + idx = Index(lrange(1000)) + if PY3: + bytes(idx) + else: + str(idx) + + def test_intersect_str_dates(self): + dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + + i1 = Index(dt_dates, dtype=object) + i2 = Index(['aa'], dtype=object) + res = i2.intersection(i1) + + self.assertEqual(len(res), 0) + + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index f7f072d5b5d2a..044d3477271ad 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -1,22 +1,20 @@ # -*- coding: utf-8 -*- from datetime import datetime -from pandas import compat -from pandas.compat import range, lrange, u, PY3 +from pandas.compat import range, PY3 +import nose import numpy as np -from pandas import (date_range, Series, DataFrame, - Index, Float64Index, Int64Index, RangeIndex) -from pandas.util.testing import assertRaisesRegexp +from pandas import (date_range, Series, Index, Float64Index, + Int64Index, UInt64Index, RangeIndex) import pandas.util.testing as tm -import pandas.core.config as cf import pandas as pd from pandas.lib import Timestamp -from .common import Base +from pandas.tests.indexes.common import Base def full_like(array, value): @@ -64,10 +62,11 @@ def test_numeric_compat(self): result = idx * np.array(5, dtype='int64') tm.assert_index_equal(result, idx * 5) - result = idx * np.arange(5, dtype='int64') + arr_dtype = 'uint64' if isinstance(idx, UInt64Index) else 'int64' + result = idx * np.arange(5, dtype=arr_dtype) tm.assert_index_equal(result, didx) - result = idx * Series(np.arange(5, dtype='int64')) + result = idx * Series(np.arange(5, dtype=arr_dtype)) tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='float64') + 0.1) @@ -448,7 +447,183 @@ def test_take_fill_value(self): idx.take(np.array([1, -5])) -class TestInt64Index(Numeric, tm.TestCase): +class NumericInt(Numeric): + + def test_view(self): + super(NumericInt, self).test_view() + + i = self._holder([], name='Foo') + i_view = i.view() + self.assertEqual(i_view.name, 'Foo') + + i_view = i.view(self._dtype) + tm.assert_index_equal(i, self._holder(i_view, name='Foo')) + + i_view = i.view(self._holder) + tm.assert_index_equal(i, self._holder(i_view, name='Foo')) + + def test_is_monotonic(self): + self.assertTrue(self.index.is_monotonic) + self.assertTrue(self.index.is_monotonic_increasing) + self.assertFalse(self.index.is_monotonic_decreasing) + + index = self._holder([4, 3, 2, 1]) + self.assertFalse(index.is_monotonic) + self.assertTrue(index.is_monotonic_decreasing) + + index = self._holder([1]) + self.assertTrue(index.is_monotonic) + self.assertTrue(index.is_monotonic_increasing) + self.assertTrue(index.is_monotonic_decreasing) + + def test_logical_compat(self): + idx = self.create_index() + self.assertEqual(idx.all(), idx.values.all()) + self.assertEqual(idx.any(), idx.values.any()) + + def test_identical(self): + i = Index(self.index.copy()) + self.assertTrue(i.identical(self.index)) + + same_values_different_type = Index(i, dtype=object) + self.assertFalse(i.identical(same_values_different_type)) + + i = self.index.copy(dtype=object) + i = i.rename('foo') + same_values = Index(i, dtype=object) + self.assertTrue(same_values.identical(i)) + + self.assertFalse(i.identical(self.index)) + self.assertTrue(Index(same_values, name='foo', dtype=object).identical( + i)) + + self.assertFalse(self.index.copy(dtype=object) + .identical(self.index.copy(dtype=self._dtype))) + + def test_join_non_unique(self): + left = Index([4, 4, 3, 3]) + + joined, lidx, ridx = left.join(left, return_indexers=True) + + exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) + self.assert_index_equal(joined, exp_joined) + + exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(lidx, exp_lidx) + + exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + def test_join_self(self): + kinds = 'outer', 'inner', 'left', 'right' + for kind in kinds: + joined = self.index.join(self.index, how=kind) + self.assertIs(self.index, joined) + + def test_union_noncomparable(self): + from datetime import datetime, timedelta + # corner case, non-Int64Index + now = datetime.now() + other = Index([now + timedelta(i) for i in range(4)], dtype=object) + result = self.index.union(other) + expected = Index(np.concatenate((self.index, other))) + tm.assert_index_equal(result, expected) + + result = other.union(self.index) + expected = Index(np.concatenate((other, self.index))) + tm.assert_index_equal(result, expected) + + def test_cant_or_shouldnt_cast(self): + # can't + data = ['foo', 'bar', 'baz'] + self.assertRaises(TypeError, self._holder, data) + + # shouldn't + data = ['0', '1', '2'] + self.assertRaises(TypeError, self._holder, data) + + def test_view_index(self): + self.index.view(Index) + + def test_prevent_casting(self): + result = self.index.astype('O') + self.assertEqual(result.dtype, np.object_) + + def test_take_preserve_name(self): + index = self._holder([1, 2, 3, 4], name='foo') + taken = index.take([3, 0, 1]) + self.assertEqual(index.name, taken.name) + + def test_take_fill_value(self): + # see gh-12631 + idx = self._holder([1, 2, 3], name='xxx') + result = idx.take(np.array([1, 0, -1])) + expected = self._holder([2, 1, 3], name='xxx') + tm.assert_index_equal(result, expected) + + name = self._holder.__name__ + msg = ("Unable to fill values because " + "{name} cannot contain NA").format(name=name) + + # fill_value=True + with tm.assertRaisesRegexp(ValueError, msg): + idx.take(np.array([1, 0, -1]), fill_value=True) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = self._holder([2, 1, 3], name='xxx') + tm.assert_index_equal(result, expected) + + with tm.assertRaisesRegexp(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assertRaisesRegexp(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with tm.assertRaises(IndexError): + idx.take(np.array([1, -5])) + + def test_slice_keep_name(self): + idx = self._holder([1, 2], name='asdf') + self.assertEqual(idx.name, idx[1:].name) + + def test_ufunc_coercions(self): + idx = self._holder([1, 2, 3, 4, 5], name='x') + + result = np.sqrt(idx) + tm.assertIsInstance(result, Float64Index) + exp = Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x') + tm.assert_index_equal(result, exp) + + result = np.divide(idx, 2.) + tm.assertIsInstance(result, Float64Index) + exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x') + tm.assert_index_equal(result, exp) + + # _evaluate_numeric_binop + result = idx + 2. + tm.assertIsInstance(result, Float64Index) + exp = Float64Index([3., 4., 5., 6., 7.], name='x') + tm.assert_index_equal(result, exp) + + result = idx - 2. + tm.assertIsInstance(result, Float64Index) + exp = Float64Index([-1., 0., 1., 2., 3.], name='x') + tm.assert_index_equal(result, exp) + + result = idx * 1. + tm.assertIsInstance(result, Float64Index) + exp = Float64Index([1., 2., 3., 4., 5.], name='x') + tm.assert_index_equal(result, exp) + + result = idx / 2. + tm.assertIsInstance(result, Float64Index) + exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x') + tm.assert_index_equal(result, exp) + + +class TestInt64Index(NumericInt, tm.TestCase): + _dtype = 'int64' _holder = Int64Index _multiprocess_can_split_ = True @@ -459,12 +634,6 @@ def setUp(self): def create_index(self): return Int64Index(np.arange(5, dtype='int64')) - def test_too_many_names(self): - def testit(): - self.index.names = ["roger", "harold"] - - assertRaisesRegexp(ValueError, "^Length", testit) - def test_constructor(self): # pass list, coerce fine index = Int64Index([-5, 0, 1, 2]) @@ -511,24 +680,6 @@ def test_constructor_corner(self): with tm.assertRaisesRegexp(TypeError, 'casting'): Int64Index(arr_with_floats) - def test_copy(self): - i = Int64Index([], name='Foo') - i_copy = i.copy() - self.assertEqual(i_copy.name, 'Foo') - - def test_view(self): - super(TestInt64Index, self).test_view() - - i = Int64Index([], name='Foo') - i_view = i.view() - self.assertEqual(i_view.name, 'Foo') - - i_view = i.view('i8') - tm.assert_index_equal(i, Int64Index(i_view, name='Foo')) - - i_view = i.view(Int64Index) - tm.assert_index_equal(i, Int64Index(i_view, name='Foo')) - def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) @@ -538,119 +689,33 @@ def test_coerce_list(self): arr = Index([1, 2, 3, 4], dtype=object) tm.assertIsInstance(arr, Index) - def test_dtype(self): - self.assertEqual(self.index.dtype, np.int64) - - def test_is_monotonic(self): - self.assertTrue(self.index.is_monotonic) - self.assertTrue(self.index.is_monotonic_increasing) - self.assertFalse(self.index.is_monotonic_decreasing) - - index = Int64Index([4, 3, 2, 1]) - self.assertFalse(index.is_monotonic) - self.assertTrue(index.is_monotonic_decreasing) - - index = Int64Index([1]) - self.assertTrue(index.is_monotonic) - self.assertTrue(index.is_monotonic_increasing) - self.assertTrue(index.is_monotonic_decreasing) - - def test_is_monotonic_na(self): - examples = [Index([np.nan]), - Index([np.nan, 1]), - Index([1, 2, np.nan]), - Index(['a', 'b', np.nan]), - pd.to_datetime(['NaT']), - pd.to_datetime(['NaT', '2000-01-01']), - pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), - pd.to_timedelta(['1 day', 'NaT']), ] - for index in examples: - self.assertFalse(index.is_monotonic_increasing) - self.assertFalse(index.is_monotonic_decreasing) - - def test_equals(self): - same_values = Index(self.index, dtype=object) - self.assertTrue(self.index.equals(same_values)) - self.assertTrue(same_values.equals(self.index)) - - def test_logical_compat(self): - idx = self.create_index() - self.assertEqual(idx.all(), idx.values.all()) - self.assertEqual(idx.any(), idx.values.any()) - - def test_identical(self): - i = Index(self.index.copy()) - self.assertTrue(i.identical(self.index)) - - same_values_different_type = Index(i, dtype=object) - self.assertFalse(i.identical(same_values_different_type)) - - i = self.index.copy(dtype=object) - i = i.rename('foo') - same_values = Index(i, dtype=object) - self.assertTrue(same_values.identical(i)) - - self.assertFalse(i.identical(self.index)) - self.assertTrue(Index(same_values, name='foo', dtype=object).identical( - i)) - - self.assertFalse(self.index.copy(dtype=object) - .identical(self.index.copy(dtype='int64'))) - def test_get_indexer(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) - def test_get_indexer_pad(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='pad') expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) - def test_get_indexer_backfill(self): target = Int64Index(np.arange(10)) indexer = self.index.get_indexer(target, method='backfill') expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) - def test_join_outer(self): - other = Int64Index([7, 12, 25, 1, 2, 5]) - other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - - # not monotonic - # guarantee of sortedness - res, lidx, ridx = self.index.join(other, how='outer', - return_indexers=True) - noidx_res = self.index.join(other, how='outer') - self.assert_index_equal(res, noidx_res) - - eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) - elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], - dtype=np.intp) - eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], - dtype=np.intp) - - tm.assertIsInstance(res, Int64Index) - self.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = self.index.join(other_mono, how='outer', - return_indexers=True) - noidx_res = self.index.join(other_mono, how='outer') - self.assert_index_equal(res, noidx_res) + def test_intersection(self): + other = Index([1, 2, 3, 4, 5]) + result = self.index.intersection(other) + expected = Index(np.sort(np.intersect1d(self.index.values, + other.values))) + tm.assert_index_equal(result, expected) - elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], - dtype=np.intp) - eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], - dtype=np.intp) - tm.assertIsInstance(res, Int64Index) - self.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) + result = other.intersection(self.index) + expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values, + other.values)))) + tm.assert_index_equal(result, expected) def test_join_inner(self): other = Int64Index([7, 12, 25, 1, 2, 5]) @@ -789,28 +854,92 @@ def test_join_non_int_index(self): right2 = other.join(self.index, how='right') self.assert_index_equal(right2, self.index.astype(object)) - def test_join_non_unique(self): - left = Index([4, 4, 3, 3]) + def test_join_outer(self): + other = Int64Index([7, 12, 25, 1, 2, 5]) + other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - joined, lidx, ridx = left.join(left, return_indexers=True) + # not monotonic + # guarantee of sortedness + res, lidx, ridx = self.index.join(other, how='outer', + return_indexers=True) + noidx_res = self.index.join(other, how='outer') + self.assert_index_equal(res, noidx_res) - exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) - self.assert_index_equal(joined, exp_joined) + eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], + dtype=np.intp) + eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], + dtype=np.intp) - exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) - tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assertIsInstance(res, Int64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) - exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) - tm.assert_numpy_array_equal(ridx, exp_ridx) + # monotonic + res, lidx, ridx = self.index.join(other_mono, how='outer', + return_indexers=True) + noidx_res = self.index.join(other_mono, how='outer') + self.assert_index_equal(res, noidx_res) - def test_join_self(self): - kinds = 'outer', 'inner', 'left', 'right' - for kind in kinds: - joined = self.index.join(self.index, how=kind) - self.assertIs(self.index, joined) + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], + dtype=np.intp) + eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], + dtype=np.intp) + tm.assertIsInstance(res, Int64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + +class TestUInt64Index(NumericInt, tm.TestCase): + + _dtype = 'uint64' + _holder = UInt64Index + _multiprocess_can_split_ = True + + def setUp(self): + self.indices = dict(index=UInt64Index([2**63, 2**63 + 10, 2**63 + 15, + 2**63 + 20, 2**63 + 25])) + self.setup_indices() + + def create_index(self): + return UInt64Index(np.arange(5, dtype='uint64')) + + def test_constructor(self): + idx = UInt64Index([1, 2, 3]) + res = Index([1, 2, 3], dtype=np.uint64) + tm.assert_index_equal(res, idx) + + idx = UInt64Index([1, 2**63]) + res = Index([1, 2**63], dtype=np.uint64) + tm.assert_index_equal(res, idx) + + idx = UInt64Index([1, 2**63]) + res = Index([1, 2**63]) + tm.assert_index_equal(res, idx) + + def test_get_indexer(self): + target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) + indexer = self.index.get_indexer(target) + expected = np.array([0, -1, 1, 2, 3, 4, + -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) + indexer = self.index.get_indexer(target, method='pad') + expected = np.array([0, 0, 1, 2, 3, 4, + 4, 4, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63) + indexer = self.index.get_indexer(target, method='backfill') + expected = np.array([0, 1, 1, 2, 3, 4, + -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): - other = Index([1, 2, 3, 4, 5]) + other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) result = self.index.intersection(other) expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) @@ -821,147 +950,198 @@ def test_intersection(self): other.values)))) tm.assert_index_equal(result, expected) - def test_intersect_str_dates(self): - dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + def test_join_inner(self): + other = UInt64Index(2**63 + np.array( + [7, 12, 25, 1, 2, 10], dtype='uint64')) + other_mono = UInt64Index(2**63 + np.array( + [1, 2, 7, 10, 12, 25], dtype='uint64')) - i1 = Index(dt_dates, dtype=object) - i2 = Index(['aa'], dtype=object) - res = i2.intersection(i1) + # not monotonic + res, lidx, ridx = self.index.join(other, how='inner', + return_indexers=True) - self.assertEqual(len(res), 0) + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) - def test_union_noncomparable(self): - from datetime import datetime, timedelta - # corner case, non-Int64Index - now = datetime.now() - other = Index([now + timedelta(i) for i in range(4)], dtype=object) - result = self.index.union(other) - expected = Index(np.concatenate((self.index, other))) - tm.assert_index_equal(result, expected) + eres = UInt64Index(2**63 + np.array([10, 25], dtype='uint64')) + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([5, 2], dtype=np.intp) - result = other.union(self.index) - expected = Index(np.concatenate((other, self.index))) - tm.assert_index_equal(result, expected) + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) - def test_cant_or_shouldnt_cast(self): - # can't - data = ['foo', 'bar', 'baz'] - self.assertRaises(TypeError, Int64Index, data) + # monotonic + res, lidx, ridx = self.index.join(other_mono, how='inner', + return_indexers=True) - # shouldn't - data = ['0', '1', '2'] - self.assertRaises(TypeError, Int64Index, data) + res2 = self.index.intersection(other_mono) + self.assert_index_equal(res, res2) - def test_view_Index(self): - self.index.view(Index) + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([3, 5], dtype=np.intp) - def test_prevent_casting(self): - result = self.index.astype('O') - self.assertEqual(result.dtype, np.object_) + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) - def test_take_preserve_name(self): - index = Int64Index([1, 2, 3, 4], name='foo') - taken = index.take([3, 0, 1]) - self.assertEqual(index.name, taken.name) + def test_join_left(self): + other = UInt64Index(2**63 + np.array( + [7, 12, 25, 1, 2, 10], dtype='uint64')) + other_mono = UInt64Index(2**63 + np.array( + [1, 2, 7, 10, 12, 25], dtype='uint64')) - def test_take_fill_value(self): - # GH 12631 - idx = pd.Int64Index([1, 2, 3], name='xxx') - result = idx.take(np.array([1, 0, -1])) - expected = pd.Int64Index([2, 1, 3], name='xxx') - tm.assert_index_equal(result, expected) + # not monotonic + res, lidx, ridx = self.index.join(other, how='left', + return_indexers=True) + eres = self.index + eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) - # fill_value - msg = "Unable to fill values because Int64Index cannot contain NA" - with tm.assertRaisesRegexp(ValueError, msg): - idx.take(np.array([1, 0, -1]), fill_value=True) + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + self.assertIsNone(lidx) + tm.assert_numpy_array_equal(ridx, eridx) - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.Int64Index([2, 1, 3], name='xxx') - tm.assert_index_equal(result, expected) + # monotonic + res, lidx, ridx = self.index.join(other_mono, how='left', + return_indexers=True) + eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) - msg = "Unable to fill values because Int64Index cannot contain NA" - with tm.assertRaisesRegexp(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assertRaisesRegexp(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + self.assertIsNone(lidx) + tm.assert_numpy_array_equal(ridx, eridx) - with tm.assertRaises(IndexError): - idx.take(np.array([1, -5])) + # non-unique + idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) + idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) + res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True) - def test_int_name_format(self): - index = Index(['a', 'b', 'c'], name=0) - s = Series(lrange(3), index) - df = DataFrame(lrange(3), index=index) - repr(s) - repr(df) - - def test_print_unicode_columns(self): - df = pd.DataFrame({u("\u05d0"): [1, 2, 3], - "\u05d1": [4, 5, 6], - "c": [7, 8, 9]}) - repr(df.columns) # should not raise UnicodeDecodeError - - def test_repr_summary(self): - with cf.option_context('display.max_seq_items', 10): - r = repr(pd.Index(np.arange(1000))) - self.assertTrue(len(r) < 200) - self.assertTrue("..." in r) + # 1 is in idx2, so it should be x2 + eres = UInt64Index(2**63 + np.array( + [1, 1, 2, 5, 7, 9], dtype='uint64')) + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - def test_repr_roundtrip(self): - tm.assert_index_equal(eval(repr(self.index)), self.index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) - def test_unicode_string_with_unicode(self): - idx = Index(lrange(1000)) + def test_join_right(self): + other = UInt64Index(2**63 + np.array( + [7, 12, 25, 1, 2, 10], dtype='uint64')) + other_mono = UInt64Index(2**63 + np.array( + [1, 2, 7, 10, 12, 25], dtype='uint64')) - if PY3: - str(idx) - else: - compat.text_type(idx) + # not monotonic + res, lidx, ridx = self.index.join(other, how='right', + return_indexers=True) + eres = other + elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) - def test_bytestring_with_unicode(self): - idx = Index(lrange(1000)) - if PY3: - bytes(idx) - else: - str(idx) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assertIsInstance(other, UInt64Index) + self.assert_index_equal(res, eres) + self.assertIsNone(ridx) - def test_slice_keep_name(self): - idx = Int64Index([1, 2], name='asdf') - self.assertEqual(idx.name, idx[1:].name) + # monotonic + res, lidx, ridx = self.index.join(other_mono, how='right', + return_indexers=True) + eres = other_mono + elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) - def test_ufunc_coercions(self): - idx = Int64Index([1, 2, 3, 4, 5], name='x') + tm.assertIsInstance(other, UInt64Index) + tm.assert_numpy_array_equal(lidx, elidx) + self.assert_index_equal(res, eres) + self.assertIsNone(ridx) - result = np.sqrt(idx) - tm.assertIsInstance(result, Float64Index) - exp = Float64Index(np.sqrt(np.array([1, 2, 3, 4, 5])), name='x') - tm.assert_index_equal(result, exp) + # non-unique + idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64')) + idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64')) + res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True) - result = np.divide(idx, 2.) - tm.assertIsInstance(result, Float64Index) - exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x') - tm.assert_index_equal(result, exp) + # 1 is in idx2, so it should be x2 + eres = UInt64Index(2**63 + np.array( + [1, 1, 2, 5, 7, 9], dtype='uint64')) + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - # _evaluate_numeric_binop - result = idx + 2. - tm.assertIsInstance(result, Float64Index) - exp = Float64Index([3., 4., 5., 6., 7.], name='x') - tm.assert_index_equal(result, exp) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) - result = idx - 2. - tm.assertIsInstance(result, Float64Index) - exp = Float64Index([-1., 0., 1., 2., 3.], name='x') - tm.assert_index_equal(result, exp) + def test_join_non_int_index(self): + other = Index(2**63 + np.array( + [1, 5, 7, 10, 20], dtype='uint64'), dtype=object) - result = idx * 1. - tm.assertIsInstance(result, Float64Index) - exp = Float64Index([1., 2., 3., 4., 5.], name='x') - tm.assert_index_equal(result, exp) + outer = self.index.join(other, how='outer') + outer2 = other.join(self.index, how='outer') + expected = Index(2**63 + np.array( + [0, 1, 5, 7, 10, 15, 20, 25], dtype='uint64')) + self.assert_index_equal(outer, outer2) + self.assert_index_equal(outer, expected) - result = idx / 2. - tm.assertIsInstance(result, Float64Index) - exp = Float64Index([0.5, 1., 1.5, 2., 2.5], name='x') - tm.assert_index_equal(result, exp) + inner = self.index.join(other, how='inner') + inner2 = other.join(self.index, how='inner') + expected = Index(2**63 + np.array([10, 20], dtype='uint64')) + self.assert_index_equal(inner, inner2) + self.assert_index_equal(inner, expected) + + left = self.index.join(other, how='left') + self.assert_index_equal(left, self.index.astype(object)) + + left2 = other.join(self.index, how='left') + self.assert_index_equal(left2, other) + + right = self.index.join(other, how='right') + self.assert_index_equal(right, other) + + right2 = other.join(self.index, how='right') + self.assert_index_equal(right2, self.index.astype(object)) + + def test_join_outer(self): + other = UInt64Index(2**63 + np.array( + [7, 12, 25, 1, 2, 10], dtype='uint64')) + other_mono = UInt64Index(2**63 + np.array( + [1, 2, 7, 10, 12, 25], dtype='uint64')) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = self.index.join(other, how='outer', + return_indexers=True) + noidx_res = self.index.join(other, how='outer') + self.assert_index_equal(res, noidx_res) + + eres = UInt64Index(2**63 + np.array( + [0, 1, 2, 7, 10, 12, 15, 20, 25], dtype='uint64')) + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) + + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = self.index.join(other_mono, how='outer', + return_indexers=True) + noidx_res = self.index.join(other_mono, how='outer') + self.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) + + tm.assertIsInstance(res, UInt64Index) + self.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 6fc24e41ee914..a50027f1d0343 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -20,7 +20,7 @@ from pandas import option_context from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice from pandas.core.api import (DataFrame, Index, Series, Panel, isnull, - MultiIndex, Timestamp, Timedelta) + MultiIndex, Timestamp, Timedelta, UInt64Index) from pandas.formats.printing import pprint_thing from pandas import concat from pandas.core.common import PerformanceWarning, UnsortedIndexError @@ -100,7 +100,8 @@ class TestIndexing(tm.TestCase): _multiprocess_can_split_ = True _objs = set(['series', 'frame', 'panel']) - _typs = set(['ints', 'labels', 'mixed', 'ts', 'floats', 'empty', 'ts_rev']) + _typs = set(['ints', 'uints', 'labels', 'mixed', + 'ts', 'floats', 'empty', 'ts_rev']) def setUp(self): @@ -116,6 +117,16 @@ def setUp(self): major_axis=lrange(0, 12, 3), minor_axis=lrange(0, 16, 4)) + self.series_uints = Series(np.random.rand(4), + index=UInt64Index(lrange(0, 8, 2))) + self.frame_uints = DataFrame(np.random.randn(4, 4), + index=UInt64Index(lrange(0, 8, 2)), + columns=UInt64Index(lrange(0, 12, 3))) + self.panel_uints = Panel(np.random.rand(4, 4, 4), + items=UInt64Index(lrange(0, 8, 2)), + major_axis=UInt64Index(lrange(0, 12, 3)), + minor_axis=UInt64Index(lrange(0, 16, 4))) + self.series_labels = Series(np.random.randn(4), index=list('abcd')) self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD')) @@ -197,10 +208,6 @@ def _print(result, error=None): pprint_thing(v) try: - # if (name == 'bool' and t == 'empty' and o == 'series' and - # method1 == 'loc'): - # import pdb; pdb.set_trace() - rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a)) try: @@ -210,6 +217,8 @@ def _print(result, error=None): _print(result) return + detail = None + try: if is_scalar(rs) and is_scalar(xp): self.assertEqual(rs, xp) @@ -220,7 +229,8 @@ def _print(result, error=None): elif xp.ndim == 3: tm.assert_panel_equal(rs, xp) result = 'ok' - except (AssertionError): + except AssertionError as e: + detail = str(e) result = 'fail' # reverse the checks @@ -228,10 +238,9 @@ def _print(result, error=None): if result == 'fail': result = 'ok (fail)' - if not result.startswith('ok'): - raise AssertionError(_print(result)) - _print(result) + if not result.startswith('ok'): + raise AssertionError(detail) except AssertionError: raise @@ -309,16 +318,17 @@ def _check(f, func, values=False): d = getattr(self, o) # iat - _check(d['ints'], 'iat', values=True) + for f in [d['ints'], d['uints']]: + _check(f, 'iat', values=True) + for f in [d['labels'], d['ts'], d['floats']]: if f is not None: self.assertRaises(ValueError, self.check_values, f, 'iat') # at - _check(d['ints'], 'at') - _check(d['labels'], 'at') - _check(d['ts'], 'at') - _check(d['floats'], 'at') + for f in [d['ints'], d['uints'], d['labels'], + d['ts'], d['floats']]: + _check(f, 'at') def test_at_and_iat_set(self): def _check(f, func, values=False): @@ -334,16 +344,18 @@ def _check(f, func, values=False): d = getattr(self, t) - _check(d['ints'], 'iat', values=True) + # iat + for f in [d['ints'], d['uints']]: + _check(f, 'iat', values=True) + for f in [d['labels'], d['ts'], d['floats']]: if f is not None: self.assertRaises(ValueError, _check, f, 'iat') # at - _check(d['ints'], 'at') - _check(d['labels'], 'at') - _check(d['ts'], 'at') - _check(d['floats'], 'at') + for f in [d['ints'], d['uints'], d['labels'], + d['ts'], d['floats']]: + _check(f, 'at') def test_at_iat_coercion(self): @@ -508,7 +520,7 @@ def test_iloc_getitem_int(self): # integer self.check_result('integer', 'iloc', 2, 'ix', - {0: 4, 1: 6, 2: 8}, typs=['ints']) + {0: 4, 1: 6, 2: 8}, typs=['ints', 'uints']) self.check_result('integer', 'iloc', 2, 'indexer', 2, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) @@ -517,7 +529,7 @@ def test_iloc_getitem_neg_int(self): # neg integer self.check_result('neg int', 'iloc', -1, 'ix', - {0: 6, 1: 9, 2: 12}, typs=['ints']) + {0: 6, 1: 9, 2: 12}, typs=['ints', 'uints']) self.check_result('neg int', 'iloc', -1, 'indexer', -1, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) @@ -527,9 +539,9 @@ def test_iloc_getitem_list_int(self): # list of ints self.check_result('list int', 'iloc', [0, 1, 2], 'ix', {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]}, - typs=['ints']) + typs=['ints', 'uints']) self.check_result('list int', 'iloc', [2], 'ix', - {0: [4], 1: [6], 2: [8]}, typs=['ints']) + {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints']) self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2], typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) @@ -539,9 +551,9 @@ def test_iloc_getitem_list_int(self): self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix', {0: [0, 2, 4], 1: [0, 3, 6], - 2: [0, 4, 8]}, typs=['ints']) + 2: [0, 4, 8]}, typs=['ints', 'uints']) self.check_result('array int', 'iloc', np.array([2]), 'ix', - {0: [4], 1: [6], 2: [8]}, typs=['ints']) + {0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints']) self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer', [0, 1, 2], typs=['labels', 'mixed', 'ts', 'floats', 'empty'], @@ -579,7 +591,7 @@ def test_iloc_getitem_dups(self): # no dups in panel (bug?) self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix', {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]}, - objs=['series', 'frame'], typs=['ints']) + objs=['series', 'frame'], typs=['ints', 'uints']) # GH 6766 df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}]) @@ -601,13 +613,13 @@ def test_iloc_getitem_array(self): s = Series(index=lrange(1, 4)) self.check_result('array like', 'iloc', s.index, 'ix', {0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]}, - typs=['ints']) + typs=['ints', 'uints']) def test_iloc_getitem_bool(self): # boolean indexers b = [True, False, True, False, ] - self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints']) + self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints']) self.check_result('bool', 'iloc', b, 'ix', b, typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) @@ -617,7 +629,7 @@ def test_iloc_getitem_slice(self): # slices self.check_result('slice', 'iloc', slice(1, 3), 'ix', {0: [2, 4], 1: [3, 6], 2: [4, 8]}, - typs=['ints']) + typs=['ints', 'uints']) self.check_result('slice', 'iloc', slice(1, 3), 'indexer', slice(1, 3), typs=['labels', 'mixed', 'ts', 'floats', 'empty'], @@ -1124,14 +1136,14 @@ def check(result, expected): def test_loc_getitem_int(self): # int label - self.check_result('int label', 'loc', 2, 'ix', 2, typs=['ints'], - axes=0) - self.check_result('int label', 'loc', 3, 'ix', 3, typs=['ints'], - axes=1) - self.check_result('int label', 'loc', 4, 'ix', 4, typs=['ints'], - axes=2) - self.check_result('int label', 'loc', 2, 'ix', 2, typs=['label'], - fails=KeyError) + self.check_result('int label', 'loc', 2, 'ix', 2, + typs=['ints', 'uints'], axes=0) + self.check_result('int label', 'loc', 3, 'ix', 3, + typs=['ints', 'uints'], axes=1) + self.check_result('int label', 'loc', 4, 'ix', 4, + typs=['ints', 'uints'], axes=2) + self.check_result('int label', 'loc', 2, 'ix', 2, + typs=['label'], fails=KeyError) def test_loc_getitem_label(self): @@ -1150,12 +1162,12 @@ def test_loc_getitem_label_out_of_range(self): # out of range label self.check_result('label range', 'loc', 'f', 'ix', 'f', - typs=['ints', 'labels', 'mixed', 'ts'], + typs=['ints', 'uints', 'labels', 'mixed', 'ts'], fails=KeyError) self.check_result('label range', 'loc', 'f', 'ix', 'f', typs=['floats'], fails=TypeError) self.check_result('label range', 'loc', 20, 'ix', 20, - typs=['ints', 'mixed'], fails=KeyError) + typs=['ints', 'uints', 'mixed'], fails=KeyError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['labels'], fails=TypeError) self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'], @@ -1167,11 +1179,11 @@ def test_loc_getitem_label_list(self): # list of labels self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4], - typs=['ints'], axes=0) + typs=['ints', 'uints'], axes=0) self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9], - typs=['ints'], axes=1) + typs=['ints', 'uints'], axes=1) self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12], - typs=['ints'], axes=2) + typs=['ints', 'uints'], axes=2) self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix', ['a', 'b', 'd'], typs=['labels'], axes=0) self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix', @@ -1188,27 +1200,27 @@ def test_loc_getitem_label_list(self): self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2], typs=['empty'], fails=KeyError) self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3], - typs=['ints'], axes=0, fails=KeyError) + typs=['ints', 'uints'], axes=0, fails=KeyError) self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7], - typs=['ints'], axes=1, fails=KeyError) + typs=['ints', 'uints'], axes=1, fails=KeyError) self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10], - typs=['ints'], axes=2, fails=KeyError) + typs=['ints', 'uints'], axes=2, fails=KeyError) def test_loc_getitem_label_list_fails(self): # fails self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40], - typs=['ints'], axes=1, fails=KeyError) + typs=['ints', 'uints'], axes=1, fails=KeyError) self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40], - typs=['ints'], axes=2, fails=KeyError) + typs=['ints', 'uints'], axes=2, fails=KeyError) def test_loc_getitem_label_array_like(self): # array like self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index, - 'ix', [0, 2, 4], typs=['ints'], axes=0) + 'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0) self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index, - 'ix', [3, 6, 9], typs=['ints'], axes=1) + 'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1) self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index, - 'ix', [4, 8, 12], typs=['ints'], axes=2) + 'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2) def test_loc_getitem_series(self): # GH14730 @@ -1236,7 +1248,8 @@ def test_loc_getitem_bool(self): # boolean indexers b = [True, False, True, False] self.check_result('bool', 'loc', b, 'ix', b, - typs=['ints', 'labels', 'mixed', 'ts', 'floats']) + typs=['ints', 'uints', 'labels', + 'mixed', 'ts', 'floats']) self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'], fails=KeyError) @@ -1244,11 +1257,11 @@ def test_loc_getitem_int_slice(self): # ok self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4], - typs=['ints'], axes=0) + typs=['ints', 'uints'], axes=0) self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6], - typs=['ints'], axes=1) + typs=['ints', 'uints'], axes=1) self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8], - typs=['ints'], axes=2) + typs=['ints', 'uints'], axes=2) # GH 3053 # loc should treat integer slices like label slices diff --git a/pandas/tests/types/test_generic.py b/pandas/tests/types/test_generic.py index 89913de6f6069..28600687e8062 100644 --- a/pandas/tests/types/test_generic.py +++ b/pandas/tests/types/test_generic.py @@ -24,6 +24,7 @@ class TestABCClasses(tm.TestCase): def test_abc_types(self): self.assertIsInstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex) self.assertIsInstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index) + self.assertIsInstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index) self.assertIsInstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index) self.assertIsInstance(self.multi_index, gt.ABCMultiIndex) self.assertIsInstance(self.datetime_index, gt.ABCDatetimeIndex) diff --git a/pandas/types/generic.py b/pandas/types/generic.py index 0d576eed43d45..86d266f4595e2 100644 --- a/pandas/types/generic.py +++ b/pandas/types/generic.py @@ -16,6 +16,8 @@ def _check(cls, inst): ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", )) ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ", ("int64index", )) +ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ", + ("uint64index", )) ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex", )) ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ", @@ -32,7 +34,7 @@ def _check(cls, inst): ("categoricalindex", )) ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ", ("index", "int64index", "rangeindex", - "float64index", + "float64index", "uint64index", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex")) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index d96f57f2810e3..d39ce7acf0029 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1573,6 +1573,10 @@ def makeIntIndex(k=10, name=None): return Index(lrange(k), name=name) +def makeUIntIndex(k=10, name=None): + return Index([2**63 + i for i in lrange(k)], name=name) + + def makeRangeIndex(k=10, name=None): return RangeIndex(0, k, 1, name=name) diff --git a/setup.py b/setup.py index 0a84cf527bfb1..a53464f8f7987 100755 --- a/setup.py +++ b/setup.py @@ -490,7 +490,8 @@ def pxd(name): index={'pyxfile': 'index', 'sources': ['pandas/src/datetime/np_datetime.c', 'pandas/src/datetime/np_datetime_strings.c'], - 'pxdfiles': ['src/util']}, + 'pxdfiles': ['src/util'], + 'depends': _pxi_dep['index']}, algos={'pyxfile': 'algos', 'pxdfiles': ['src/util'], 'depends': _pxi_dep['algos']},
1) Introduces and propagates `UInt64Index`, an index specifically for `uint`. xref #14935 2) <strike> Patches bug from #14916 that makes `maybe_convert_objects` robust against the known `numpy` bug that `uint64` cannot be compared to `int64`. This bug was caught during testing of `UInt64Index`. </strike> **UPDATE**: Patched in #14951
https://api.github.com/repos/pandas-dev/pandas/pulls/14937
2016-12-21T08:03:45Z
2017-01-17T13:53:58Z
null
2017-01-18T05:10:19Z
TST: Concat MultiIndex dataframes with deepcopy (#9967)
diff --git a/pandas/tools/tests/test_concat.py b/pandas/tools/tests/test_concat.py index f541de316661a..172eee99b7c6b 100644 --- a/pandas/tools/tests/test_concat.py +++ b/pandas/tools/tests/test_concat.py @@ -2151,6 +2151,27 @@ def test_concat_multiindex_rangeindex(self): exp = df.iloc[[2, 3, 4, 5], :] tm.assert_frame_equal(res, exp) + def test_concat_multiindex_dfs_with_deepcopy(self): + # GH 9967 + from copy import deepcopy + example_multiindex1 = pd.MultiIndex.from_product([['a'], ['b']]) + example_dataframe1 = pd.DataFrame([0], index=example_multiindex1) + + example_multiindex2 = pd.MultiIndex.from_product([['a'], ['c']]) + example_dataframe2 = pd.DataFrame([1], index=example_multiindex2) + + example_dict = {'s1': example_dataframe1, 's2': example_dataframe2} + expected_index = pd.MultiIndex(levels=[['s1', 's2'], + ['a'], + ['b', 'c']], + labels=[[0, 1], [0, 0], [0, 1]], + names=['testname', None, None]) + expected = pd.DataFrame([[0], [1]], index=expected_index) + result_copy = pd.concat(deepcopy(example_dict), names=['testname']) + tm.assert_frame_equal(result_copy, expected) + result_no_copy = pd.concat(example_dict, names=['testname']) + tm.assert_frame_equal(result_no_copy, expected) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
- [x] closes #9967 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Potentially related to #10638? Otherwise, don't think this was fixed recently
https://api.github.com/repos/pandas-dev/pandas/pulls/14936
2016-12-21T06:34:45Z
2016-12-22T11:25:52Z
2016-12-22T11:25:52Z
2017-12-20T02:03:39Z
BUG: Patch rank() uint64 behavior
diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 04f3ac70bdf5c..32955fd0f465b 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -67,499 +67,6 @@ tiebreakers = { } -# ctypedef fused pvalue_t: -# float64_t -# int64_t -# object - -# from cython cimport floating, integral - -cdef _take_2d_float64(ndarray[float64_t, ndim=2] values, - object idx): - cdef: - Py_ssize_t i, j, N, K - ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx - ndarray[float64_t, ndim=2] result - object val - - N, K = (<object> values).shape - result = np.empty_like(values) - for i in range(N): - for j in range(K): - result[i, j] = values[i, indexer[i, j]] - return result - -cdef _take_2d_int64(ndarray[int64_t, ndim=2] values, - object idx): - cdef: - Py_ssize_t i, j, N, K - ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx - ndarray[int64_t, ndim=2] result - object val - - N, K = (<object> values).shape - result = np.empty_like(values) - for i in range(N): - for j in range(K): - result[i, j] = values[i, indexer[i, j]] - return result - -cdef _take_2d_object(ndarray[object, ndim=2] values, - object idx): - cdef: - Py_ssize_t i, j, N, K - ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx - ndarray[object, ndim=2] result - object val - - N, K = (<object> values).shape - result = values.copy() - for i in range(N): - for j in range(K): - result[i, j] = values[i, indexer[i, j]] - return result - - -def rank_1d_float64(object in_arr, ties_method='average', ascending=True, - na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, n, dups = 0, total_tie_count = 0 - ndarray[float64_t] sorted_data, ranks, values - ndarray[int64_t] argsorted - float64_t val, nan_value - float64_t sum_ranks = 0 - int tiebreak = 0 - bint keep_na = 0 - float count = 0.0 - tiebreak = tiebreakers[ties_method] - - values = np.asarray(in_arr).copy() - - keep_na = na_option == 'keep' - - if ascending ^ (na_option == 'top'): - nan_value = np.inf - else: - nan_value = -np.inf - mask = np.isnan(values) - np.putmask(values, mask, nan_value) - - n = len(values) - ranks = np.empty(n, dtype='f8') - - # py2.5/win32 hack, can't pass i8 - if tiebreak == TIEBREAK_FIRST: - # need to use a stable sort here - _as = values.argsort(kind='mergesort') - if not ascending: - tiebreak = TIEBREAK_FIRST_DESCENDING - else: - _as = values.argsort() - - if not ascending: - _as = _as[::-1] - - sorted_data = values.take(_as) - argsorted = _as.astype('i8') - - for i in range(n): - sum_ranks += i + 1 - dups += 1 - val = sorted_data[i] - if (val == nan_value) and keep_na: - ranks[argsorted[i]] = nan - continue - count += 1.0 - if i == n - 1 or sorted_data[i + 1] != val: - if tiebreak == TIEBREAK_AVERAGE: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i + 1 - elif tiebreak == TIEBREAK_FIRST: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = j + 1 - elif tiebreak == TIEBREAK_FIRST_DESCENDING: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = 2 * i - j - dups + 2 - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = total_tie_count - sum_ranks = dups = 0 - if pct: - return ranks / count - else: - return ranks - - -def rank_1d_int64(object in_arr, ties_method='average', ascending=True, - na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, n, dups = 0, total_tie_count = 0 - ndarray[int64_t] sorted_data, values - ndarray[float64_t] ranks - ndarray[int64_t] argsorted - int64_t val, nan_value - float64_t sum_ranks = 0 - bint keep_na - int tiebreak = 0 - float count = 0.0 - tiebreak = tiebreakers[ties_method] - - keep_na = na_option == 'keep' - - values = np.asarray(in_arr) - - if ascending ^ (na_option == 'top'): - nan_value = np.iinfo('int64').max - else: - nan_value = np.iinfo('int64').min - - # unlike floats, which have np.inf, -np.inf, and np.nan - # ints do not - mask = values == iNaT - np.putmask(values, mask, nan_value) - - n = len(values) - ranks = np.empty(n, dtype='f8') - - # py2.5/win32 hack, can't pass i8 - if tiebreak == TIEBREAK_FIRST: - # need to use a stable sort here - _as = values.argsort(kind='mergesort') - if not ascending: - tiebreak = TIEBREAK_FIRST_DESCENDING - else: - _as = values.argsort() - - if not ascending: - _as = _as[::-1] - - sorted_data = values.take(_as) - argsorted = _as.astype('i8') - - for i in range(n): - sum_ranks += i + 1 - dups += 1 - val = sorted_data[i] - if (val == nan_value) and keep_na: - ranks[argsorted[i]] = nan - continue - count += 1.0 - if i == n - 1 or fabs(sorted_data[i + 1] - val) > 0: - if tiebreak == TIEBREAK_AVERAGE: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i + 1 - elif tiebreak == TIEBREAK_FIRST: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = j + 1 - elif tiebreak == TIEBREAK_FIRST_DESCENDING: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = 2 * i - j - dups + 2 - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = total_tie_count - sum_ranks = dups = 0 - if pct: - return ranks / count - else: - return ranks - - -def rank_2d_float64(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, z, k, n, dups = 0, total_tie_count = 0 - ndarray[float64_t, ndim=2] ranks, values - ndarray[int64_t, ndim=2] argsorted - float64_t val, nan_value - float64_t sum_ranks = 0 - int tiebreak = 0 - bint keep_na = 0 - float count = 0.0 - - tiebreak = tiebreakers[ties_method] - - keep_na = na_option == 'keep' - - in_arr = np.asarray(in_arr) - - if axis == 0: - values = in_arr.T.copy() - else: - values = in_arr.copy() - - if ascending ^ (na_option == 'top'): - nan_value = np.inf - else: - nan_value = -np.inf - - np.putmask(values, np.isnan(values), nan_value) - - n, k = (<object> values).shape - ranks = np.empty((n, k), dtype='f8') - - if tiebreak == TIEBREAK_FIRST: - # need to use a stable sort here - _as = values.argsort(axis=1, kind='mergesort') - if not ascending: - tiebreak = TIEBREAK_FIRST_DESCENDING - else: - _as = values.argsort(1) - - if not ascending: - _as = _as[:, ::-1] - - values = _take_2d_float64(values, _as) - argsorted = _as.astype('i8') - - for i in range(n): - dups = sum_ranks = 0 - total_tie_count = 0 - count = 0.0 - for j in range(k): - sum_ranks += j + 1 - dups += 1 - val = values[i, j] - if val == nan_value and keep_na: - ranks[i, argsorted[i, j]] = nan - continue - count += 1.0 - if j == k - 1 or values[i, j + 1] != val: - if tiebreak == TIEBREAK_AVERAGE: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j + 1 - elif tiebreak == TIEBREAK_FIRST: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = z + 1 - elif tiebreak == TIEBREAK_FIRST_DESCENDING: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = 2 * j - z - dups + 2 - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = total_tie_count - sum_ranks = dups = 0 - if pct: - ranks[i, :] /= count - if axis == 0: - return ranks.T - else: - return ranks - - -def rank_2d_int64(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, z, k, n, dups = 0, total_tie_count = 0 - ndarray[float64_t, ndim=2] ranks - ndarray[int64_t, ndim=2] argsorted - ndarray[int64_t, ndim=2, cast=True] values - int64_t val, nan_value - float64_t sum_ranks = 0 - bint keep_na = 0 - int tiebreak = 0 - float count = 0.0 - tiebreak = tiebreakers[ties_method] - - keep_na = na_option == 'keep' - - in_arr = np.asarray(in_arr) - - if axis == 0: - values = in_arr.T.copy() - else: - values = in_arr.copy() - - if ascending ^ (na_option == 'top'): - nan_value = np.iinfo('int64').max - else: - nan_value = np.iinfo('int64').min - - # unlike floats, which have np.inf, -np.inf, and np.nan - # ints do not - np.putmask(values, values == iNaT, nan_value) - - n, k = (<object> values).shape - ranks = np.empty((n, k), dtype='f8') - - if tiebreak == TIEBREAK_FIRST: - # need to use a stable sort here - _as = values.argsort(axis=1, kind='mergesort') - if not ascending: - tiebreak = TIEBREAK_FIRST_DESCENDING - else: - _as = values.argsort(1) - - if not ascending: - _as = _as[:, ::-1] - - values = _take_2d_int64(values, _as) - argsorted = _as.astype('i8') - - for i in range(n): - dups = sum_ranks = 0 - total_tie_count = 0 - count = 0.0 - for j in range(k): - sum_ranks += j + 1 - dups += 1 - val = values[i, j] - if val == nan_value and keep_na: - ranks[i, argsorted[i, j]] = nan - continue - count += 1.0 - if j == k - 1 or fabs(values[i, j + 1] - val) > FP_ERR: - if tiebreak == TIEBREAK_AVERAGE: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j + 1 - elif tiebreak == TIEBREAK_FIRST: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = z + 1 - elif tiebreak == TIEBREAK_FIRST_DESCENDING: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = 2 * j - z - dups + 2 - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = total_tie_count - sum_ranks = dups = 0 - if pct: - ranks[i, :] /= count - if axis == 0: - return ranks.T - else: - return ranks - - -def rank_1d_generic(object in_arr, bint retry=1, ties_method='average', - ascending=True, na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, n, dups = 0, total_tie_count = 0 - ndarray[float64_t] ranks - ndarray sorted_data, values - ndarray[int64_t] argsorted - object val, nan_value - float64_t sum_ranks = 0 - int tiebreak = 0 - bint keep_na = 0 - float count = 0.0 - - tiebreak = tiebreakers[ties_method] - - keep_na = na_option == 'keep' - - values = np.array(in_arr, copy=True) - - if values.dtype != np.object_: - values = values.astype('O') - - if ascending ^ (na_option == 'top'): - # always greater than everything - nan_value = Infinity() - else: - nan_value = NegInfinity() - - mask = lib.isnullobj(values) - np.putmask(values, mask, nan_value) - - n = len(values) - ranks = np.empty(n, dtype='f8') - - # py2.5/win32 hack, can't pass i8 - try: - _as = values.argsort() - except TypeError: - if not retry: - raise - - valid_locs = (~mask).nonzero()[0] - ranks.put(valid_locs, rank_1d_generic(values.take(valid_locs), 0, - ties_method=ties_method, - ascending=ascending)) - np.putmask(ranks, mask, np.nan) - return ranks - - if not ascending: - _as = _as[::-1] - - sorted_data = values.take(_as) - argsorted = _as.astype('i8') - for i in range(n): - sum_ranks += i + 1 - dups += 1 - val = util.get_value_at(sorted_data, i) - if val is nan_value and keep_na: - ranks[argsorted[i]] = nan - continue - if (i == n - 1 or - are_diff(util.get_value_at(sorted_data, i + 1), val)): - count += 1.0 - if tiebreak == TIEBREAK_AVERAGE: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = i + 1 - elif tiebreak == TIEBREAK_FIRST: - raise ValueError('first not supported for non-numeric data') - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for j in range(i - dups + 1, i + 1): - ranks[argsorted[j]] = total_tie_count - sum_ranks = dups = 0 - if pct: - return ranks / count - else: - return ranks - cdef inline are_diff(object left, object right): try: return fabs(left - right) > FP_ERR @@ -589,122 +96,6 @@ class NegInfinity(object): __ge__ = lambda self, other: self is other -def rank_2d_generic(object in_arr, axis=0, ties_method='average', - ascending=True, na_option='keep', pct=False): - """ - Fast NaN-friendly version of scipy.stats.rankdata - """ - - cdef: - Py_ssize_t i, j, z, k, n, infs, dups = 0 - Py_ssize_t total_tie_count = 0 - ndarray[float64_t, ndim=2] ranks - ndarray[object, ndim=2] values - ndarray[int64_t, ndim=2] argsorted - object val, nan_value - float64_t sum_ranks = 0 - int tiebreak = 0 - bint keep_na = 0 - float count = 0.0 - - tiebreak = tiebreakers[ties_method] - - keep_na = na_option == 'keep' - - in_arr = np.asarray(in_arr) - - if axis == 0: - values = in_arr.T.copy() - else: - values = in_arr.copy() - - if values.dtype != np.object_: - values = values.astype('O') - - if ascending ^ (na_option == 'top'): - # always greater than everything - nan_value = Infinity() - else: - nan_value = NegInfinity() - - mask = lib.isnullobj2d(values) - np.putmask(values, mask, nan_value) - - n, k = (<object> values).shape - ranks = np.empty((n, k), dtype='f8') - - try: - _as = values.argsort(1) - except TypeError: - values = in_arr - for i in range(len(values)): - ranks[i] = rank_1d_generic(in_arr[i], - ties_method=ties_method, - ascending=ascending, - pct=pct) - if axis == 0: - return ranks.T - else: - return ranks - - if not ascending: - _as = _as[:, ::-1] - - values = _take_2d_object(values, _as) - argsorted = _as.astype('i8') - - for i in range(n): - dups = sum_ranks = infs = 0 - total_tie_count = 0 - count = 0.0 - for j in range(k): - val = values[i, j] - if val is nan_value and keep_na: - ranks[i, argsorted[i, j]] = nan - infs += 1 - continue - count += 1.0 - sum_ranks += (j - infs) + 1 - dups += 1 - if j == k - 1 or are_diff(values[i, j + 1], val): - if tiebreak == TIEBREAK_AVERAGE: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = sum_ranks / dups - elif tiebreak == TIEBREAK_MIN: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j - dups + 2 - elif tiebreak == TIEBREAK_MAX: - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = j + 1 - elif tiebreak == TIEBREAK_FIRST: - raise ValueError('first not supported for ' - 'non-numeric data') - elif tiebreak == TIEBREAK_DENSE: - total_tie_count += 1 - for z in range(j - dups + 1, j + 1): - ranks[i, argsorted[i, z]] = total_tie_count - sum_ranks = dups = 0 - if pct: - ranks[i, :] /= count - if axis == 0: - return ranks.T - else: - return ranks - -# def _take_indexer_2d(ndarray[float64_t, ndim=2] values, -# ndarray[Py_ssize_t, ndim=2, cast=True] indexer): -# cdef: -# Py_ssize_t i, j, N, K -# ndarray[float64_t, ndim=2] result - -# N, K = (<object> values).shape -# result = np.empty_like(values) -# for i in range(N): -# for j in range(K): -# result[i, j] = values[i, indexer[i, j]] -# return result - - cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil except -1: cdef numeric t @@ -1269,4 +660,5 @@ cdef inline float64_t _median_linear(float64_t* a, int n): # generated from template include "algos_common_helper.pxi" include "algos_groupby_helper.pxi" +include "algos_rank_helper.pxi" include "algos_take_helper.pxi" diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9b7bf2bf058ef..b4a61b26aceb3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -68,7 +68,7 @@ def match(to_match, values, na_sentinel=-1): if issubclass(values.dtype.type, string_types): values = np.array(values, dtype='O') - f = lambda htype, caster: _match_generic(to_match, values, htype, caster) + f = lambda htype, caster: _match_object(to_match, values, htype, caster) result = _hashtable_algo(f, values, np.int64) if na_sentinel != -1: @@ -82,7 +82,7 @@ def match(to_match, values, na_sentinel=-1): return result -def _match_generic(values, index, table_type, type_caster): +def _match_object(values, index, table_type, type_caster): values = type_caster(values) index = type_caster(index) table = table_type(min(len(index), 1000000)) @@ -105,11 +105,11 @@ def unique(values): """ values = com._asarray_tuplesafe(values) - f = lambda htype, caster: _unique_generic(values, htype, caster) + f = lambda htype, caster: _unique_object(values, htype, caster) return _hashtable_algo(f, values) -def _unique_generic(values, table_type, type_caster): +def _unique_object(values, table_type, type_caster): values = type_caster(values) table = table_type(min(len(values), 1000000)) uniques = table.unique(values) @@ -366,6 +366,9 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): if isinstance(values, Index): uniques = values._shallow_copy(uniques, name=None) elif isinstance(values, Series): + # TODO: This constructor is bugged for uint's, especially + # np.uint64 due to overflow. Test this for uint behavior + # once constructor has been fixed. uniques = Index(uniques) return labels, uniques @@ -595,7 +598,27 @@ def mode(values): def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ + Rank the values along a given axis. + Parameters + ---------- + values : array-like + Array whose values will be ranked. The number of dimensions in this + array must not exceed 2. + axis : int, default 0 + Axis over which to perform rankings. + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + The method by which tiebreaks are broken during the ranking. + na_option : {'keep', 'top'}, default 'keep' + The method by which NaNs are placed in the ranking. + - ``keep``: rank each NaN value with a NaN ranking + - ``top``: replace each NaN with either +/- inf so that they + there are ranked at the top + ascending : boolean, default True + Whether or not the elements should be ranked in ascending order. + pct : boolean, default False + Whether or not to the display the returned rankings in integer form + (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) @@ -605,6 +628,8 @@ def rank(values, axis=0, method='average', na_option='keep', f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) + else: + raise TypeError("Array with ndim > 2 are not supported.") return ranks @@ -700,13 +725,15 @@ def _broadcast(arr_or_scalar, shape): _rank1d_functions = { 'float64': algos.rank_1d_float64, 'int64': algos.rank_1d_int64, - 'generic': algos.rank_1d_generic + 'uint64': algos.rank_1d_uint64, + 'object': algos.rank_1d_object } _rank2d_functions = { 'float64': algos.rank_2d_float64, 'int64': algos.rank_2d_int64, - 'generic': algos.rank_2d_generic + 'uint64': algos.rank_2d_uint64, + 'object': algos.rank_2d_object } @@ -934,9 +961,10 @@ def _hashtable_algo(f, values, return_dtype=None): _hashtables = { 'float64': (htable.Float64HashTable, htable.Float64Vector), + 'uint64': (htable.UInt64HashTable, htable.UInt64Vector), 'int64': (htable.Int64HashTable, htable.Int64Vector), 'string': (htable.StringHashTable, htable.ObjectVector), - 'generic': (htable.PyObjectHashTable, htable.ObjectVector) + 'object': (htable.PyObjectHashTable, htable.ObjectVector) } @@ -951,11 +979,15 @@ def _get_data_algo(values, func_map): f = func_map['int64'] values = values.view('i8') - elif is_integer_dtype(values): + elif is_signed_integer_dtype(values): f = func_map['int64'] values = _ensure_int64(values) - else: + elif is_unsigned_integer_dtype(values): + f = func_map['uint64'] + values = _ensure_uint64(values) + + else: values = _ensure_object(values) # its cheaper to use a String Hash Table than Object @@ -966,7 +998,7 @@ def _get_data_algo(values, func_map): pass if f is None: - f = func_map['generic'] + f = func_map['object'] return f, values @@ -997,7 +1029,7 @@ def wrapper(arr, indexer, out, fill_value=np.nan): return wrapper -def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info): +def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info): # this is not ideal, performance-wise, but it's better than raising # an exception (best to optimize in Cython to avoid getting here) row_idx, col_idx = indexer @@ -1020,7 +1052,7 @@ def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info): out[i, j] = arr[u_, v] -def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info): +def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info): if mask_info is not None: mask, needs_masking = mask_info else: @@ -1171,8 +1203,8 @@ def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None): def func(arr, indexer, out, fill_value=np.nan): indexer = _ensure_int64(indexer) - _take_nd_generic(arr, indexer, out, axis=axis, fill_value=fill_value, - mask_info=mask_info) + _take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value, + mask_info=mask_info) return func @@ -1343,8 +1375,8 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, if func is None: def func(arr, indexer, out, fill_value=np.nan): - _take_2d_multi_generic(arr, indexer, out, fill_value=fill_value, - mask_info=mask_info) + _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, + mask_info=mask_info) func(arr, indexer, out=out, fill_value=fill_value) return out diff --git a/pandas/src/algos_rank_helper.pxi.in b/pandas/src/algos_rank_helper.pxi.in new file mode 100644 index 0000000000000..7e7f819c7515f --- /dev/null +++ b/pandas/src/algos_rank_helper.pxi.in @@ -0,0 +1,385 @@ +""" +Template for each `dtype` helper function for rank + +WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in +""" + +#---------------------------------------------------------------------- +# rank_1d, rank_2d +#---------------------------------------------------------------------- + +{{py: + +# dtype ctype pos_nan_value neg_nan_value +dtypes = [('object', 'object', 'Infinity()', 'NegInfinity()'), + ('float64', 'float64_t', 'np.inf', '-np.inf'), + ('uint64', 'uint64_t', '', ''), + ('int64', 'int64_t', 'np.iinfo(np.int64).max', + 'np.iinfo(np.int64).min')] + +}} + +{{for dtype, ctype, pos_nan_value, neg_nan_value in dtypes}} + + +@cython.wraparound(False) +@cython.boundscheck(False) +{{if dtype == 'object'}} + + +def rank_1d_{{dtype}}(object in_arr, bint retry=1, ties_method='average', + ascending=True, na_option='keep', pct=False): +{{else}} + + +def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True, + na_option='keep', pct=False): +{{endif}} + """ + Fast NaN-friendly version of scipy.stats.rankdata + """ + + cdef: + Py_ssize_t i, j, n, dups = 0, total_tie_count = 0 + + {{if dtype == 'object'}} + ndarray sorted_data, values + {{else}} + ndarray[{{ctype}}] sorted_data, values + {{endif}} + + ndarray[float64_t] ranks + ndarray[int64_t] argsorted + + {{if dtype == 'uint64'}} + {{ctype}} val + {{else}} + {{ctype}} val, nan_value + {{endif}} + + float64_t sum_ranks = 0 + int tiebreak = 0 + bint keep_na = 0 + float count = 0.0 + tiebreak = tiebreakers[ties_method] + + {{if dtype == 'float64'}} + values = np.asarray(in_arr).copy() + {{elif dtype == 'object'}} + values = np.array(in_arr, copy=True) + + if values.dtype != np.object_: + values = values.astype('O') + {{else}} + values = np.asarray(in_arr) + {{endif}} + + keep_na = na_option == 'keep' + + {{if dtype != 'uint64'}} + if ascending ^ (na_option == 'top'): + nan_value = {{pos_nan_value}} + else: + nan_value = {{neg_nan_value}} + + {{if dtype == 'object'}} + mask = lib.isnullobj(values) + {{elif dtype == 'float64'}} + mask = np.isnan(values) + {{elif dtype == 'int64'}} + mask = values == iNaT + {{endif}} + + np.putmask(values, mask, nan_value) + {{endif}} + + n = len(values) + ranks = np.empty(n, dtype='f8') + + {{if dtype == 'object'}} + try: + _as = values.argsort() + except TypeError: + if not retry: + raise + + valid_locs = (~mask).nonzero()[0] + ranks.put(valid_locs, rank_1d_object(values.take(valid_locs), 0, + ties_method=ties_method, + ascending=ascending)) + np.putmask(ranks, mask, np.nan) + return ranks + {{else}} + if tiebreak == TIEBREAK_FIRST: + # need to use a stable sort here + _as = values.argsort(kind='mergesort') + if not ascending: + tiebreak = TIEBREAK_FIRST_DESCENDING + else: + _as = values.argsort() + {{endif}} + + if not ascending: + _as = _as[::-1] + + sorted_data = values.take(_as) + argsorted = _as.astype('i8') + + {{if dtype == 'object'}} + for i in range(n): + sum_ranks += i + 1 + dups += 1 + + val = util.get_value_at(sorted_data, i) + + if (val is nan_value) and keep_na: + ranks[argsorted[i]] = nan + continue + + count += 1.0 + + if (i == n - 1 or + are_diff(util.get_value_at(sorted_data, i + 1), val)): + if tiebreak == TIEBREAK_AVERAGE: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = sum_ranks / dups + elif tiebreak == TIEBREAK_MIN: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = i - dups + 2 + elif tiebreak == TIEBREAK_MAX: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = i + 1 + elif tiebreak == TIEBREAK_FIRST: + raise ValueError('first not supported for non-numeric data') + elif tiebreak == TIEBREAK_FIRST_DESCENDING: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = 2 * i - j - dups + 2 + elif tiebreak == TIEBREAK_DENSE: + total_tie_count += 1 + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = total_tie_count + sum_ranks = dups = 0 + {{else}} + with nogil: + for i in range(n): + sum_ranks += i + 1 + dups += 1 + + val = sorted_data[i] + + {{if dtype != 'uint64'}} + if (val == nan_value) and keep_na: + ranks[argsorted[i]] = nan + continue + {{endif}} + + count += 1.0 + + {{if dtype == 'float64'}} + if i == n - 1 or sorted_data[i + 1] != val: + {{else}} + if i == n - 1 or fabs(sorted_data[i + 1] - val) > 0: + {{endif}} + if tiebreak == TIEBREAK_AVERAGE: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = sum_ranks / dups + elif tiebreak == TIEBREAK_MIN: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = i - dups + 2 + elif tiebreak == TIEBREAK_MAX: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = i + 1 + elif tiebreak == TIEBREAK_FIRST: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = j + 1 + elif tiebreak == TIEBREAK_FIRST_DESCENDING: + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = 2 * i - j - dups + 2 + elif tiebreak == TIEBREAK_DENSE: + total_tie_count += 1 + for j in range(i - dups + 1, i + 1): + ranks[argsorted[j]] = total_tie_count + sum_ranks = dups = 0 + {{endif}} + if pct: + return ranks / count + else: + return ranks + + +def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average', + ascending=True, na_option='keep', pct=False): + """ + Fast NaN-friendly version of scipy.stats.rankdata + """ + + cdef: + Py_ssize_t i, j, z, k, n, dups = 0, total_tie_count = 0 + + {{if dtype == 'object'}} + Py_ssize_t infs + {{endif}} + + ndarray[float64_t, ndim=2] ranks + {{if dtype == 'int64' or dtype == 'uint64'}} + ndarray[{{ctype}}, ndim=2, cast=True] values + {{else}} + ndarray[{{ctype}}, ndim=2] values + {{endif}} + + ndarray[int64_t, ndim=2] argsorted + + {{if dtype == 'uint64'}} + {{ctype}} val + {{else}} + {{ctype}} val, nan_value + {{endif}} + + float64_t sum_ranks = 0 + int tiebreak = 0 + bint keep_na = 0 + float count = 0.0 + + tiebreak = tiebreakers[ties_method] + + keep_na = na_option == 'keep' + + in_arr = np.asarray(in_arr) + + if axis == 0: + values = in_arr.T.copy() + else: + values = in_arr.copy() + + {{if dtype == 'object'}} + if values.dtype != np.object_: + values = values.astype('O') + {{endif}} + + {{if dtype != 'uint64'}} + if ascending ^ (na_option == 'top'): + nan_value = {{pos_nan_value}} + else: + nan_value = {{neg_nan_value}} + + {{if dtype == 'object'}} + mask = lib.isnullobj2d(values) + {{elif dtype == 'float64'}} + mask = np.isnan(values) + {{elif dtype == 'int64'}} + mask = values == iNaT + {{endif}} + + np.putmask(values, mask, nan_value) + {{endif}} + + n, k = (<object> values).shape + ranks = np.empty((n, k), dtype='f8') + + {{if dtype == 'object'}} + try: + _as = values.argsort(1) + except TypeError: + values = in_arr + for i in range(len(values)): + ranks[i] = rank_1d_object(in_arr[i], ties_method=ties_method, + ascending=ascending, pct=pct) + if axis == 0: + return ranks.T + else: + return ranks + {{else}} + if tiebreak == TIEBREAK_FIRST: + # need to use a stable sort here + _as = values.argsort(axis=1, kind='mergesort') + if not ascending: + tiebreak = TIEBREAK_FIRST_DESCENDING + else: + _as = values.argsort(1) + {{endif}} + + if not ascending: + _as = _as[:, ::-1] + + values = _take_2d_{{dtype}}(values, _as) + argsorted = _as.astype('i8') + + for i in range(n): + {{if dtype == 'object'}} + dups = sum_ranks = infs = 0 + {{else}} + dups = sum_ranks = 0 + {{endif}} + + total_tie_count = 0 + count = 0.0 + for j in range(k): + {{if dtype != 'object'}} + sum_ranks += j + 1 + dups += 1 + {{endif}} + + val = values[i, j] + + {{if dtype != 'uint64'}} + {{if dtype == 'object'}} + if (val is nan_value) and keep_na: + {{else}} + if (val == nan_value) and keep_na: + {{endif}} + ranks[i, argsorted[i, j]] = nan + + {{if dtype == 'object'}} + infs += 1 + {{endif}} + + continue + {{endif}} + + count += 1.0 + + {{if dtype == 'object'}} + sum_ranks += (j - infs) + 1 + dups += 1 + {{endif}} + + {{if dtype == 'object'}} + if j == k - 1 or are_diff(values[i, j + 1], val): + {{elif dtype == 'float64'}} + if j == k - 1 or values[i, j + 1] != val: + {{else}} + if j == k - 1 or fabs(values[i, j + 1] - val) > FP_ERR: + {{endif}} + if tiebreak == TIEBREAK_AVERAGE: + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = sum_ranks / dups + elif tiebreak == TIEBREAK_MIN: + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = j - dups + 2 + elif tiebreak == TIEBREAK_MAX: + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = j + 1 + elif tiebreak == TIEBREAK_FIRST: + {{if dtype == 'object'}} + raise ValueError('first not supported ' + 'for non-numeric data') + {{else}} + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = z + 1 + {{endif}} + elif tiebreak == TIEBREAK_FIRST_DESCENDING: + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = 2 * j - z - dups + 2 + elif tiebreak == TIEBREAK_DENSE: + total_tie_count += 1 + for z in range(j - dups + 1, j + 1): + ranks[i, argsorted[i, z]] = total_tie_count + sum_ranks = dups = 0 + if pct: + ranks[i, :] /= count + if axis == 0: + return ranks.T + else: + return ranks + +{{endfor}} diff --git a/pandas/src/algos_take_helper.pxi.in b/pandas/src/algos_take_helper.pxi.in index e9abbcd13f499..71bb1bb4fe9be 100644 --- a/pandas/src/algos_take_helper.pxi.in +++ b/pandas/src/algos_take_helper.pxi.in @@ -258,4 +258,35 @@ def take_2d_multi_{{name}}_{{dest}}(ndarray[{{c_type_in}}, ndim=2] values, else: out[i, j] = {{preval}}values[idx, idx1[j]]{{postval}} -{{endfor}} \ No newline at end of file +{{endfor}} + +#---------------------------------------------------------------------- +# take_2d internal function +#---------------------------------------------------------------------- + +{{py: + +# dtype, ctype, init_result +dtypes = [('float64', 'float64_t', 'np.empty_like(values)'), + ('uint64', 'uint64_t', 'np.empty_like(values)'), + ('object', 'object', 'values.copy()'), + ('int64', 'int64_t', 'np.empty_like(values)')] +}} + +{{for dtype, ctype, init_result in dtypes}} + +cdef _take_2d_{{dtype}}(ndarray[{{ctype}}, ndim=2] values, object idx): + cdef: + Py_ssize_t i, j, N, K + ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx + ndarray[{{ctype}}, ndim=2] result + object val + + N, K = (<object> values).shape + result = {{init_result}} + for i in range(N): + for j in range(K): + result[i, j] = values[i, indexer[i, j]] + return result + +{{endfor}} diff --git a/pandas/src/hashtable_class_helper.pxi.in b/pandas/src/hashtable_class_helper.pxi.in index 55c840b20c78b..b26839599ef38 100644 --- a/pandas/src/hashtable_class_helper.pxi.in +++ b/pandas/src/hashtable_class_helper.pxi.in @@ -204,7 +204,7 @@ cdef class HashTable: # name, dtype, null_condition, float_group dtypes = [('Float64', 'float64', 'val != val', True), - ('UInt64', 'uint64', 'val == 0', False), + ('UInt64', 'uint64', 'False', False), ('Int64', 'int64', 'val == iNaT', False)] }} diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index e360089928000..75dd887c9d290 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -969,21 +969,44 @@ def test_unique_label_indices(): check_dtype=False) -def test_rank(): - tm._skip_if_no_scipy() - from scipy.stats import rankdata - - def _check(arr): - mask = ~np.isfinite(arr) - arr = arr.copy() - result = _algos.rank_1d_float64(arr) - arr[mask] = np.inf - exp = rankdata(arr) - exp[mask] = nan - assert_almost_equal(result, exp) - - _check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan])) - _check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan])) +class TestRank(tm.TestCase): + + def test_scipy_compat(self): + tm._skip_if_no_scipy() + from scipy.stats import rankdata + + def _check(arr): + mask = ~np.isfinite(arr) + arr = arr.copy() + result = _algos.rank_1d_float64(arr) + arr[mask] = np.inf + exp = rankdata(arr) + exp[mask] = nan + assert_almost_equal(result, exp) + + _check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan])) + _check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan])) + + def test_basic(self): + exp = np.array([1, 2], dtype=np.float64) + + for dtype in np.typecodes['AllInteger']: + s = Series([1, 100], dtype=dtype) + tm.assert_numpy_array_equal(algos.rank(s), exp) + + def test_uint64_overflow(self): + exp = np.array([1, 2], dtype=np.float64) + + for dtype in [np.float64, np.uint64]: + s = Series([1, 2**63], dtype=dtype) + tm.assert_numpy_array_equal(algos.rank(s), exp) + + def test_too_many_ndims(self): + arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) + msg = "Array with ndim > 2 are not supported" + + with tm.assertRaisesRegexp(TypeError, msg): + algos.rank(arr) def test_pad_backfill_object_segfault(): diff --git a/setup.py b/setup.py index e3774d8e36ce9..0821a7d907e6c 100755 --- a/setup.py +++ b/setup.py @@ -112,7 +112,7 @@ def is_platform_mac(): _pxipath = pjoin('pandas', 'src') _pxi_dep_template = { 'algos': ['algos_common_helper.pxi.in', 'algos_groupby_helper.pxi.in', - 'algos_take_helper.pxi.in'], + 'algos_take_helper.pxi.in', 'algos_rank_helper.pxi.in'], '_join': ['join_helper.pxi.in', 'joins_func_helper.pxi.in'], 'hashtable': ['hashtable_class_helper.pxi.in', 'hashtable_func_helper.pxi.in'],
Adds `uint64` ranking functions to `algos.pyx` to allow for proper ranking with `uint64`. Also introduces partial patch for `factorize()` by adding `uint64` hashtables and vectors for usage. However, this patch is only partial because the larger bug of non-support for `uint64` in `Index` has not been fixed (**UPDATE**: tackled in #14937): ~~~python >>> from pandas import Index, np >>> Index(np.array([2**63], dtype=np.uint64)) Int64Index([-9223372036854775808], dtype='int64') ~~~ Also patches a bug in `UInt64HashTable` from #14915 that had an erroneous null condition that was caught during testing and was hence removed. Note there is overlap with #14934 with the implementation of `is_signed_integer_dtype` and `is_unsigned_integer_dtype`. That PR should be merged before this one.
https://api.github.com/repos/pandas-dev/pandas/pulls/14935
2016-12-21T04:25:45Z
2016-12-24T22:04:45Z
null
2016-12-24T22:05:33Z
BUG, TST: Check uint64 behaviour in algorithms.py
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index b32b9fbbab04e..ffd2fa90dc9e6 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -298,5 +298,6 @@ Bug Fixes - Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) +- Bug in ``pd.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14915`) - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) - Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) diff --git a/pandas/api/tests/test_api.py b/pandas/api/tests/test_api.py index 49aa31c375e25..bc126447213ca 100644 --- a/pandas/api/tests/test_api.py +++ b/pandas/api/tests/test_api.py @@ -153,10 +153,10 @@ class TestTypes(Base, tm.TestCase): 'is_floating_dtype', 'is_int64_dtype', 'is_integer', 'is_integer_dtype', 'is_number', 'is_numeric_dtype', 'is_object_dtype', 'is_scalar', 'is_sparse', - 'is_string_dtype', + 'is_string_dtype', 'is_signed_integer_dtype', 'is_timedelta64_dtype', 'is_timedelta64_ns_dtype', - 'is_period', 'is_period_dtype', - 'is_re', 'is_re_compilable', + 'is_unsigned_integer_dtype', 'is_period', + 'is_period_dtype', 'is_re', 'is_re_compilable', 'is_dict_like', 'is_iterator', 'is_list_like', 'is_hashable', 'is_named_tuple', 'is_sequence', diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e51774ce4d9b4..1a967bdd7a1a3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -9,7 +9,9 @@ from pandas import compat, lib, tslib, _np_version_under1p8 from pandas.types.cast import _maybe_promote from pandas.types.generic import ABCSeries, ABCIndex -from pandas.types.common import (is_integer_dtype, +from pandas.types.common import (is_unsigned_integer_dtype, + is_signed_integer_dtype, + is_integer_dtype, is_int64_dtype, is_categorical_dtype, is_extension_type, @@ -479,8 +481,9 @@ def _value_counts_arraylike(values, dropna=True): keys, counts = htable.value_count_float64(values, dropna) else: values = _ensure_object(values) + keys, counts = htable.value_count_object(values, dropna) + mask = isnull(values) - keys, counts = htable.value_count_object(values, mask) if not dropna and mask.any(): keys = np.insert(keys, 0, np.NaN) counts = np.insert(counts, 0, mask.sum()) @@ -490,12 +493,14 @@ def _value_counts_arraylike(values, dropna=True): def duplicated(values, keep='first'): """ - Return boolean ndarray denoting duplicate values + Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- + values : ndarray-like + Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. @@ -521,9 +526,12 @@ def duplicated(values, keep='first'): elif isinstance(values, (ABCSeries, ABCIndex)): values = values.values - if is_integer_dtype(dtype): + if is_signed_integer_dtype(dtype): values = _ensure_int64(values) duplicated = htable.duplicated_int64(values, keep=keep) + elif is_unsigned_integer_dtype(dtype): + values = _ensure_uint64(values) + duplicated = htable.duplicated_uint64(values, keep=keep) elif is_float_dtype(dtype): values = _ensure_float64(values) duplicated = htable.duplicated_float64(values, keep=keep) @@ -535,7 +543,19 @@ def duplicated(values, keep='first'): def mode(values): - """Returns the mode or mode(s) of the passed Series or ndarray (sorted)""" + """ + Returns the mode(s) of an array. + + Parameters + ---------- + values : array-like + Array over which to check for duplicate values. + + Returns + ------- + mode : Series + """ + # must sort because hash order isn't necessarily defined. from pandas.core.series import Series @@ -547,23 +567,23 @@ def mode(values): constructor = Series dtype = values.dtype - if is_integer_dtype(values): + if is_signed_integer_dtype(values): values = _ensure_int64(values) - result = constructor(sorted(htable.mode_int64(values)), dtype=dtype) - + result = constructor(np.sort(htable.mode_int64(values)), dtype=dtype) + elif is_unsigned_integer_dtype(values): + values = _ensure_uint64(values) + result = constructor(np.sort(htable.mode_uint64(values)), dtype=dtype) elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): dtype = values.dtype values = values.view(np.int64) - result = constructor(sorted(htable.mode_int64(values)), dtype=dtype) - + result = constructor(np.sort(htable.mode_int64(values)), dtype=dtype) elif is_categorical_dtype(values): result = constructor(values.mode()) else: - mask = isnull(values) values = _ensure_object(values) - res = htable.mode_object(values, mask) + res = htable.mode_object(values) try: - res = sorted(res) + res = np.sort(res) except TypeError as e: warn("Unable to sort modes: %s" % e) result = constructor(res, dtype=dtype) @@ -893,8 +913,10 @@ def _hashtable_algo(f, values, return_dtype=None): dtype = values.dtype if is_float_dtype(dtype): return f(htable.Float64HashTable, _ensure_float64) - elif is_integer_dtype(dtype): + elif is_signed_integer_dtype(dtype): return f(htable.Int64HashTable, _ensure_int64) + elif is_unsigned_integer_dtype(dtype): + return f(htable.UInt64HashTable, _ensure_uint64) elif is_datetime64_dtype(dtype): return_dtype = return_dtype or 'M8[ns]' return f(htable.Int64HashTable, _ensure_int64).view(return_dtype) diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index ce760b49fabc0..276b0679070dc 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -22,6 +22,8 @@ cdef extern from "numpy/npy_math.h": cimport cython cimport numpy as cnp +from pandas.lib import checknull + cnp.import_array() cnp.import_ufunc() @@ -117,165 +119,6 @@ cdef class Int64Factorizer: return labels -@cython.wraparound(False) -@cython.boundscheck(False) -cdef build_count_table_object(ndarray[object] values, - ndarray[uint8_t, cast=True] mask, - kh_pymap_t *table): - cdef: - khiter_t k - Py_ssize_t i, n = len(values) - int ret = 0 - - kh_resize_pymap(table, n // 10) - - for i in range(n): - if mask[i]: - continue - - val = values[i] - k = kh_get_pymap(table, <PyObject*> val) - if k != table.n_buckets: - table.vals[k] += 1 - else: - k = kh_put_pymap(table, <PyObject*> val, &ret) - table.vals[k] = 1 - - -@cython.wraparound(False) -@cython.boundscheck(False) -cpdef value_count_object(ndarray[object] values, - ndarray[uint8_t, cast=True] mask): - cdef: - Py_ssize_t i - kh_pymap_t *table - int k - - table = kh_init_pymap() - build_count_table_object(values, mask, table) - - i = 0 - result_keys = np.empty(table.n_occupied, dtype=object) - result_counts = np.zeros(table.n_occupied, dtype=np.int64) - for k in range(table.n_buckets): - if kh_exist_pymap(table, k): - result_keys[i] = <object> table.keys[k] - result_counts[i] = table.vals[k] - i += 1 - kh_destroy_pymap(table) - - return result_keys, result_counts - - -@cython.wraparound(False) -@cython.boundscheck(False) -def mode_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask): - cdef: - int count, max_count = 2 - int j = -1 # so you can do += - int k - ndarray[object] modes - kh_pymap_t *table - - table = kh_init_pymap() - build_count_table_object(values, mask, table) - - modes = np.empty(table.n_buckets, dtype=np.object_) - for k in range(table.n_buckets): - if kh_exist_pymap(table, k): - count = table.vals[k] - - if count == max_count: - j += 1 - elif count > max_count: - max_count = count - j = 0 - else: - continue - modes[j] = <object> table.keys[k] - - kh_destroy_pymap(table) - - return modes[:j + 1] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def mode_int64(int64_t[:] values): - cdef: - int count, max_count = 2 - int j = -1 # so you can do += - int k - kh_int64_t *table - ndarray[int64_t] modes - - table = kh_init_int64() - - build_count_table_int64(values, table, 0) - - modes = np.empty(table.n_buckets, dtype=np.int64) - - with nogil: - for k in range(table.n_buckets): - if kh_exist_int64(table, k): - count = table.vals[k] - - if count == max_count: - j += 1 - elif count > max_count: - max_count = count - j = 0 - else: - continue - modes[j] = table.keys[k] - - kh_destroy_int64(table) - - return modes[:j + 1] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def duplicated_object(ndarray[object] values, object keep='first'): - cdef: - Py_ssize_t i, n - dict seen = dict() - object row - - n = len(values) - cdef ndarray[uint8_t] result = np.zeros(n, dtype=np.uint8) - - if keep == 'last': - for i from n > i >= 0: - row = values[i] - if row in seen: - result[i] = 1 - else: - seen[row] = i - result[i] = 0 - elif keep == 'first': - for i from 0 <= i < n: - row = values[i] - if row in seen: - result[i] = 1 - else: - seen[row] = i - result[i] = 0 - elif keep is False: - for i from 0 <= i < n: - row = values[i] - if row in seen: - result[i] = 1 - result[seen[row]] = 1 - else: - seen[row] = i - result[i] = 0 - else: - raise ValueError('keep must be either "first", "last" or False') - - return result.view(np.bool_) - - @cython.wraparound(False) @cython.boundscheck(False) def unique_label_indices(ndarray[int64_t, ndim=1] labels): diff --git a/pandas/src/hashtable_func_helper.pxi.in b/pandas/src/hashtable_func_helper.pxi.in index f3e16cfd32963..c292256767315 100644 --- a/pandas/src/hashtable_func_helper.pxi.in +++ b/pandas/src/hashtable_func_helper.pxi.in @@ -10,105 +10,272 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in {{py: -# name -dtypes = ['float64', 'int64', 'uint64'] +# dtype, ttype +dtypes = [('float64', 'float64'), + ('uint64', 'uint64'), + ('object', 'pymap'), + ('int64', 'int64')] }} -{{for dtype in dtypes}} +{{for dtype, ttype in dtypes}} @cython.wraparound(False) @cython.boundscheck(False) +{{if dtype == 'object'}} +cdef build_count_table_{{dtype}}(ndarray[{{dtype}}] values, + kh_{{ttype}}_t *table, bint dropna): +{{else}} cdef build_count_table_{{dtype}}({{dtype}}_t[:] values, - kh_{{dtype}}_t *table, bint dropna): + kh_{{ttype}}_t *table, bint dropna): +{{endif}} cdef: khiter_t k Py_ssize_t i, n = len(values) + + {{if dtype != 'object'}} {{dtype}}_t val + {{endif}} + int ret = 0 + {{if dtype == 'object'}} + kh_resize_{{ttype}}(table, n // 10) + + for i in range(n): + val = values[i] + + if not checknull(val) or not dropna: + k = kh_get_{{ttype}}(table, <PyObject*> val) + if k != table.n_buckets: + table.vals[k] += 1 + else: + k = kh_put_{{ttype}}(table, <PyObject*> val, &ret) + table.vals[k] = 1 + {{else}} with nogil: - kh_resize_{{dtype}}(table, n) + kh_resize_{{ttype}}(table, n) for i in range(n): val = values[i] if val == val or not dropna: - k = kh_get_{{dtype}}(table, val) + k = kh_get_{{ttype}}(table, val) if k != table.n_buckets: table.vals[k] += 1 else: - k = kh_put_{{dtype}}(table, val, &ret) + k = kh_put_{{ttype}}(table, val, &ret) table.vals[k] = 1 + {{endif}} @cython.wraparound(False) @cython.boundscheck(False) +{{if dtype == 'object'}} +cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna): +{{else}} cpdef value_count_{{dtype}}({{dtype}}_t[:] values, bint dropna): +{{endif}} cdef: Py_ssize_t i=0 - kh_{{dtype}}_t *table + kh_{{ttype}}_t *table + + {{if dtype != 'object'}} {{dtype}}_t[:] result_keys int64_t[:] result_counts + {{endif}} + int k - table = kh_init_{{dtype}}() + table = kh_init_{{ttype}}() + {{if dtype == 'object'}} + build_count_table_{{dtype}}(values, table, 1) + {{else}} build_count_table_{{dtype}}(values, table, dropna) + {{endif}} result_keys = np.empty(table.n_occupied, dtype=np.{{dtype}}) result_counts = np.zeros(table.n_occupied, dtype=np.int64) + {{if dtype == 'object'}} + for k in range(table.n_buckets): + if kh_exist_{{ttype}}(table, k): + result_keys[i] = <{{dtype}}> table.keys[k] + result_counts[i] = table.vals[k] + i += 1 + {{else}} with nogil: for k in range(table.n_buckets): - if kh_exist_{{dtype}}(table, k): + if kh_exist_{{ttype}}(table, k): result_keys[i] = table.keys[k] result_counts[i] = table.vals[k] i += 1 - kh_destroy_{{dtype}}(table) + {{endif}} + kh_destroy_{{ttype}}(table) + + {{if dtype == 'object'}} + return result_keys, result_counts + {{else}} return np.asarray(result_keys), np.asarray(result_counts) + {{endif}} @cython.wraparound(False) @cython.boundscheck(False) -def duplicated_{{dtype}}({{dtype}}_t[:] values, - object keep='first'): +{{if dtype == 'object'}} + + +def duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'): +{{else}} + + +def duplicated_{{dtype}}({{dtype}}_t[:] values, object keep='first'): +{{endif}} cdef: int ret = 0, k + {{if dtype != 'object'}} {{dtype}}_t value + {{endif}} Py_ssize_t i, n = len(values) - kh_{{dtype}}_t * table = kh_init_{{dtype}}() + kh_{{ttype}}_t * table = kh_init_{{ttype}}() ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool') - kh_resize_{{dtype}}(table, min(n, _SIZE_HINT_LIMIT)) + kh_resize_{{ttype}}(table, min(n, _SIZE_HINT_LIMIT)) if keep not in ('last', 'first', False): raise ValueError('keep must be either "first", "last" or False') if keep == 'last': + {{if dtype == 'object'}} + for i from n > i >= 0: + kh_put_{{ttype}}(table, <PyObject*> values[i], &ret) + out[i] = ret == 0 + {{else}} with nogil: - for i from n > i >=0: - kh_put_{{dtype}}(table, values[i], &ret) + for i from n > i >= 0: + kh_put_{{ttype}}(table, values[i], &ret) out[i] = ret == 0 + {{endif}} elif keep == 'first': + {{if dtype == 'object'}} + for i from 0 <= i < n: + kh_put_{{ttype}}(table, <PyObject*> values[i], &ret) + out[i] = ret == 0 + {{else}} with nogil: for i from 0 <= i < n: - kh_put_{{dtype}}(table, values[i], &ret) + kh_put_{{ttype}}(table, values[i], &ret) out[i] = ret == 0 + {{endif}} else: + {{if dtype == 'object'}} + for i from 0 <= i < n: + value = values[i] + k = kh_get_{{ttype}}(table, <PyObject*> value) + if k != table.n_buckets: + out[table.vals[k]] = 1 + out[i] = 1 + else: + k = kh_put_{{ttype}}(table, <PyObject*> value, &ret) + table.keys[k] = <PyObject*> value + table.vals[k] = i + out[i] = 0 + {{else}} with nogil: for i from 0 <= i < n: value = values[i] - k = kh_get_{{dtype}}(table, value) + k = kh_get_{{ttype}}(table, value) if k != table.n_buckets: out[table.vals[k]] = 1 out[i] = 1 else: - k = kh_put_{{dtype}}(table, value, &ret) + k = kh_put_{{ttype}}(table, value, &ret) table.keys[k] = value table.vals[k] = i out[i] = 0 - kh_destroy_{{dtype}}(table) + {{endif}} + kh_destroy_{{ttype}}(table) return out {{endfor}} + +#---------------------------------------------------------------------- +# Mode Computations +#---------------------------------------------------------------------- + +{{py: + +# dtype, ctype, table_type, npy_dtype +dtypes = [('int64', 'int64_t', 'int64', 'int64'), + ('uint64', 'uint64_t', 'uint64', 'uint64'), + ('object', 'object', 'pymap', 'object_')] +}} + +{{for dtype, ctype, table_type, npy_dtype in dtypes}} + + +@cython.wraparound(False) +@cython.boundscheck(False) + +{{if dtype == 'object'}} + + +def mode_{{dtype}}(ndarray[{{ctype}}] values): +{{else}} + + +def mode_{{dtype}}({{ctype}}[:] values): +{{endif}} + cdef: + int count, max_count = 2 + int j = -1 # so you can do += + int k + kh_{{table_type}}_t *table + ndarray[{{ctype}}] modes + + table = kh_init_{{table_type}}() + {{if dtype == 'object'}} + build_count_table_{{dtype}}(values, table, 1) + {{else}} + build_count_table_{{dtype}}(values, table, 0) + {{endif}} + + modes = np.empty(table.n_buckets, dtype=np.{{npy_dtype}}) + + {{if dtype != 'object'}} + with nogil: + for k in range(table.n_buckets): + if kh_exist_{{table_type}}(table, k): + count = table.vals[k] + + if count == max_count: + j += 1 + elif count > max_count: + max_count = count + j = 0 + else: + continue + + modes[j] = table.keys[k] + {{else}} + for k in range(table.n_buckets): + if kh_exist_{{table_type}}(table, k): + count = table.vals[k] + + if count == max_count: + j += 1 + elif count > max_count: + max_count = count + j = 0 + else: + continue + + modes[j] = <object> table.keys[k] + {{endif}} + + kh_destroy_{{table_type}}(table) + + return modes[:j + 1] + +{{endfor}} diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index d4c209d4532e4..3896e255f0c2f 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -10,8 +10,8 @@ import numpy as np import pandas as pd -from pandas import (Series, DataFrame, isnull, notnull, bdate_range, - date_range, _np_version_under1p10) +from pandas import (Series, Categorical, DataFrame, isnull, notnull, + bdate_range, date_range, _np_version_under1p10) from pandas.core.index import MultiIndex from pandas.tseries.index import Timestamp from pandas.tseries.tdi import Timedelta @@ -128,45 +128,99 @@ def test_median(self): self.assertAlmostEqual(np.median(int_ts), int_ts.median()) def test_mode(self): - s = Series([12, 12, 11, 10, 19, 11]) - exp = Series([11, 12]) - assert_series_equal(s.mode(), exp) - - assert_series_equal( - Series([1, 2, 3]).mode(), Series( - [], dtype='int64')) - - lst = [5] * 20 + [1] * 10 + [6] * 25 - np.random.shuffle(lst) - s = Series(lst) - assert_series_equal(s.mode(), Series([6])) - - s = Series([5] * 10) - assert_series_equal(s.mode(), Series([5])) - - s = Series(lst) - s[0] = np.nan - assert_series_equal(s.mode(), Series([6.])) - - s = Series(list('adfasbasfwewefwefweeeeasdfasnbam')) - assert_series_equal(s.mode(), Series(['e'])) - - s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]') - assert_series_equal(s.mode(), Series([], dtype="M8[ns]")) - s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03', - '2013-01-02'], dtype='M8[ns]') - assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'], - dtype='M8[ns]')) - - # GH 5986 - s = Series(['1 days', '-1 days', '0 days'], dtype='timedelta64[ns]') - assert_series_equal(s.mode(), Series([], dtype='timedelta64[ns]')) + # No mode should be found. + exp = Series([], dtype=np.float64) + tm.assert_series_equal(Series([]).mode(), exp) + + exp = Series([], dtype=np.int64) + tm.assert_series_equal(Series([1]).mode(), exp) + + exp = Series([], dtype=np.object) + tm.assert_series_equal(Series(['a', 'b', 'c']).mode(), exp) + + # Test numerical data types. + exp_single = [1] + data_single = [1] * 5 + [2] * 3 + + exp_multi = [1, 3] + data_multi = [1] * 5 + [2] * 3 + [3] * 5 + + for dt in np.typecodes['AllInteger'] + np.typecodes['Float']: + s = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_series_equal(s.mode(), exp) + + s = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_series_equal(s.mode(), exp) + + # Test string and object types. + exp = ['b'] + data = ['a'] * 2 + ['b'] * 3 + + s = Series(data, dtype='c') + exp = Series(exp, dtype='c') + tm.assert_series_equal(s.mode(), exp) + + exp = ['bar'] + data = ['foo'] * 2 + ['bar'] * 3 + + for dt in [str, object]: + s = Series(data, dtype=dt) + exp = Series(exp, dtype=dt) + tm.assert_series_equal(s.mode(), exp) + + # Test datetime types. + exp = Series([], dtype="M8[ns]") + s = Series(['2011-01-03', '2013-01-02', + '1900-05-03'], dtype='M8[ns]') + tm.assert_series_equal(s.mode(), exp) + + exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]') + s = Series(['2011-01-03', '2013-01-02', '1900-05-03', + '2011-01-03', '2013-01-02'], dtype='M8[ns]') + tm.assert_series_equal(s.mode(), exp) + + # gh-5986: Test timedelta types. + exp = Series([], dtype='timedelta64[ns]') + s = Series(['1 days', '-1 days', '0 days'], + dtype='timedelta64[ns]') + tm.assert_series_equal(s.mode(), exp) + exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]') s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min', - '2 min', '2 min'], - dtype='timedelta64[ns]') - assert_series_equal(s.mode(), Series(['2 min', '1 day'], - dtype='timedelta64[ns]')) + '2 min', '2 min'], dtype='timedelta64[ns]') + tm.assert_series_equal(s.mode(), exp) + + # Test mixed dtype. + exp = Series(['foo']) + s = Series([1, 'foo', 'foo']) + tm.assert_series_equal(s.mode(), exp) + + # Test for uint64 overflow. + exp = Series([2**63], dtype=np.uint64) + s = Series([1, 2**63, 2**63], dtype=np.uint64) + tm.assert_series_equal(s.mode(), exp) + + exp = Series([], dtype=np.uint64) + s = Series([1, 2**63], dtype=np.uint64) + tm.assert_series_equal(s.mode(), exp) + + # Test category dtype. + c = Categorical([1, 2]) + exp = Categorical([], categories=[1, 2]) + exp = Series(exp, dtype='category') + tm.assert_series_equal(Series(c).mode(), exp) + + c = Categorical([1, 'a', 'a']) + exp = Categorical(['a'], categories=[1, 'a']) + exp = Series(exp, dtype='category') + tm.assert_series_equal(Series(c).mode(), exp) + + c = Categorical([1, 1, 2, 3, 3]) + exp = Categorical([1, 3], categories=[1, 2, 3]) + exp = Series(exp, dtype='category') + tm.assert_series_equal(Series(c).mode(), exp) def test_prod(self): self._check_stat_op('prod', np.prod) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 7f1745edbb816..e360089928000 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -365,6 +365,11 @@ def test_timedelta64_dtype_array_returned(self): tm.assert_numpy_array_equal(result, expected) self.assertEqual(result.dtype, expected.dtype) + def test_uint64_overflow(self): + s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64) + exp = np.array([1, 2, 2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(algos.unique(s), exp) + class TestIsin(tm.TestCase): _multiprocess_can_split_ = True @@ -672,7 +677,9 @@ def test_numeric_object_likes(self): np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j, 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]), np.array(['a', 'b', 'a', 'e', 'c', - 'b', 'd', 'a', 'e', 'f'], dtype=object)] + 'b', 'd', 'a', 'e', 'f'], dtype=object), + np.array([1, 2**63, 1, 3**5, 10, + 2**63, 39, 1, 3**5, 7], dtype=np.uint64)] exp_first = np.array([False, False, True, False, False, True, False, True, True, False]) @@ -1202,6 +1209,118 @@ def test_int64_add_overflow(): b_mask=np.array([False, True])) +class TestMode(tm.TestCase): + + def test_no_mode(self): + exp = Series([], dtype=np.float64) + tm.assert_series_equal(algos.mode([]), exp) + + exp = Series([], dtype=np.int) + tm.assert_series_equal(algos.mode([1]), exp) + + exp = Series([], dtype=np.object) + tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp) + + def test_number_mode(self): + exp_single = [1] + data_single = [1] * 5 + [2] * 3 + + exp_multi = [1, 3] + data_multi = [1] * 5 + [2] * 3 + [3] * 5 + + for dt in np.typecodes['AllInteger'] + np.typecodes['Float']: + s = Series(data_single, dtype=dt) + exp = Series(exp_single, dtype=dt) + tm.assert_series_equal(algos.mode(s), exp) + + s = Series(data_multi, dtype=dt) + exp = Series(exp_multi, dtype=dt) + tm.assert_series_equal(algos.mode(s), exp) + + def test_strobj_mode(self): + exp = ['b'] + data = ['a'] * 2 + ['b'] * 3 + + s = Series(data, dtype='c') + exp = Series(exp, dtype='c') + tm.assert_series_equal(algos.mode(s), exp) + + exp = ['bar'] + data = ['foo'] * 2 + ['bar'] * 3 + + for dt in [str, object]: + s = Series(data, dtype=dt) + exp = Series(exp, dtype=dt) + tm.assert_series_equal(algos.mode(s), exp) + + def test_datelike_mode(self): + exp = Series([], dtype="M8[ns]") + s = Series(['2011-01-03', '2013-01-02', + '1900-05-03'], dtype='M8[ns]') + tm.assert_series_equal(algos.mode(s), exp) + + exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]') + s = Series(['2011-01-03', '2013-01-02', '1900-05-03', + '2011-01-03', '2013-01-02'], dtype='M8[ns]') + tm.assert_series_equal(algos.mode(s), exp) + + def test_timedelta_mode(self): + exp = Series([], dtype='timedelta64[ns]') + s = Series(['1 days', '-1 days', '0 days'], + dtype='timedelta64[ns]') + tm.assert_series_equal(algos.mode(s), exp) + + exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]') + s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min', + '2 min', '2 min'], dtype='timedelta64[ns]') + tm.assert_series_equal(algos.mode(s), exp) + + def test_mixed_dtype(self): + exp = Series(['foo']) + s = Series([1, 'foo', 'foo']) + tm.assert_series_equal(algos.mode(s), exp) + + def test_uint64_overflow(self): + exp = Series([2**63], dtype=np.uint64) + s = Series([1, 2**63, 2**63], dtype=np.uint64) + tm.assert_series_equal(algos.mode(s), exp) + + exp = Series([], dtype=np.uint64) + s = Series([1, 2**63], dtype=np.uint64) + tm.assert_series_equal(algos.mode(s), exp) + + def test_categorical(self): + c = Categorical([1, 2]) + exp = Series([], dtype=np.int64) + tm.assert_series_equal(algos.mode(c), exp) + + c = Categorical([1, 'a', 'a']) + exp = Series(['a'], dtype=object) + tm.assert_series_equal(algos.mode(c), exp) + + c = Categorical([1, 1, 2, 3, 3]) + exp = Series([1, 3], dtype=np.int64) + tm.assert_series_equal(algos.mode(c), exp) + + def test_index(self): + idx = Index([1, 2, 3]) + exp = Series([], dtype=np.int64) + tm.assert_series_equal(algos.mode(idx), exp) + + idx = Index([1, 'a', 'a']) + exp = Series(['a'], dtype=object) + tm.assert_series_equal(algos.mode(idx), exp) + + idx = Index([1, 1, 2, 3, 3]) + exp = Series([1, 3], dtype=np.int64) + tm.assert_series_equal(algos.mode(idx), exp) + + exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]') + idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min', + '2 min', '2 min'], dtype='timedelta64[ns]') + tm.assert_series_equal(algos.mode(idx), exp) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/types/api.py b/pandas/types/api.py index 096dc2f84aa67..c809cb3614a8c 100644 --- a/pandas/types/api.py +++ b/pandas/types/api.py @@ -44,6 +44,8 @@ is_floating_dtype, is_bool_dtype, is_complex_dtype, + is_signed_integer_dtype, + is_unsigned_integer_dtype, # like is_re, diff --git a/pandas/types/common.py b/pandas/types/common.py index 06c8ef6e35cd7..96eb6d6968bfb 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -155,6 +155,18 @@ def is_integer_dtype(arr_or_dtype): not issubclass(tipo, (np.datetime64, np.timedelta64))) +def is_signed_integer_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return (issubclass(tipo, np.signedinteger) and + not issubclass(tipo, (np.datetime64, np.timedelta64))) + + +def is_unsigned_integer_dtype(arr_or_dtype): + tipo = _get_dtype_type(arr_or_dtype) + return (issubclass(tipo, np.unsignedinteger) and + not issubclass(tipo, (np.datetime64, np.timedelta64))) + + def is_int64_dtype(arr_or_dtype): tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.int64)
First of a series of PR's to patch and test `uint64` behaviour in `core/algorithms.py`. In this PR, the following functions are checked: 1. `duplicated()` : robust but now has test to confirm 2. `mode()` : robust but now has test to confirm 3. `unique()` : non-robust but patched and tested
https://api.github.com/repos/pandas-dev/pandas/pulls/14934
2016-12-20T22:40:55Z
2016-12-23T11:15:24Z
null
2016-12-23T13:46:27Z
PERF: fix getitem unique_check / initialization issue
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 8cbf5b8d97b70..adbe73aa5c5ef 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -68,6 +68,8 @@ class Iteration(object): def setup(self): self.df = DataFrame(randn(10000, 1000)) self.df2 = DataFrame(np.random.randn(50000, 10)) + self.df3 = pd.DataFrame(np.random.randn(1000,5000), + columns=['C'+str(c) for c in range(5000)]) def f(self): if hasattr(self.df, '_item_cache'): @@ -85,6 +87,11 @@ def time_iteritems(self): def time_iteritems_cached(self): self.g() + def time_iteritems_indexing(self): + df = self.df3 + for col in df: + df[col] + def time_itertuples(self): for row in self.df2.itertuples(): pass diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index 72dbca223ef71..ef40faf5457e7 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -23,6 +23,7 @@ Performance Improvements - Improved performance of ``.replace()`` (:issue:`12745`) - Improved performance of ``PeriodIndex`` (:issue:`14822`) +- Performance regression in indexing with getitem (:issue:`14930`) - Improved performance ``Series`` creation with a datetime index and dictionary data (:issue:`14894`) .. _whatsnew_0192.enhancements.other: diff --git a/pandas/index.pyx b/pandas/index.pyx index a6eb74727a999..a245e85d80f96 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -82,20 +82,13 @@ cdef class IndexEngine: cdef: bint unique, monotonic_inc, monotonic_dec - bint initialized, monotonic_check, unique_check + bint need_monotonic_check, need_unique_check def __init__(self, vgetter, n): self.vgetter = vgetter self.over_size_threshold = n >= _SIZE_CUTOFF - - self.initialized = 0 - self.monotonic_check = 0 - self.unique_check = 0 - - self.unique = 0 - self.monotonic_inc = 0 - self.monotonic_dec = 0 + self.clear_mapping() def __contains__(self, object val): self._ensure_mapping_populated() @@ -213,16 +206,20 @@ cdef class IndexEngine: property is_unique: def __get__(self): - if not self.initialized: - self.initialize() + if self.need_unique_check: + self._do_unique_check() - self.unique_check = 1 return self.unique == 1 + cdef inline _do_unique_check(self): + + # this de-facto the same + self._ensure_mapping_populated() + property is_monotonic_increasing: def __get__(self): - if not self.monotonic_check: + if self.need_monotonic_check: self._do_monotonic_check() return self.monotonic_inc == 1 @@ -230,7 +227,7 @@ cdef class IndexEngine: property is_monotonic_decreasing: def __get__(self): - if not self.monotonic_check: + if self.need_monotonic_check: self._do_monotonic_check() return self.monotonic_dec == 1 @@ -246,13 +243,12 @@ cdef class IndexEngine: self.monotonic_dec = 0 is_unique = 0 - self.monotonic_check = 1 + self.need_monotonic_check = 0 # we can only be sure of uniqueness if is_unique=1 if is_unique: - self.initialized = 1 self.unique = 1 - self.unique_check = 1 + self.need_unique_check = 0 cdef _get_index_values(self): return self.vgetter() @@ -266,30 +262,32 @@ cdef class IndexEngine: cdef _check_type(self, object val): hash(val) + property is_mapping_populated: + + def __get__(self): + return self.mapping is not None + cdef inline _ensure_mapping_populated(self): - # need to reset if we have previously - # set the initialized from monotonic checks - if self.unique_check: - self.initialized = 0 - if not self.initialized: - self.initialize() - - cdef initialize(self): - values = self._get_index_values() + # this populates the mapping + # if its not already populated + # also satisfies the need_unique_check - self.mapping = self._make_hash_table(len(values)) - self.mapping.map_locations(values) + if not self.is_mapping_populated: - if len(self.mapping) == len(values): - self.unique = 1 + values = self._get_index_values() + + self.mapping = self._make_hash_table(len(values)) + self.mapping.map_locations(values) + + if len(self.mapping) == len(values): + self.unique = 1 - self.initialized = 1 + self.need_unique_check = 0 def clear_mapping(self): self.mapping = None - self.initialized = 0 - self.monotonic_check = 0 - self.unique_check = 0 + self.need_monotonic_check = 1 + self.need_unique_check = 1 self.unique = 0 self.monotonic_inc = 0
closes #14930
https://api.github.com/repos/pandas-dev/pandas/pulls/14933
2016-12-20T21:19:03Z
2016-12-21T11:09:55Z
null
2016-12-21T11:09:55Z
PERF: PeriodIndex speed up
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index ff5a201057bcd..f9837191a7bae 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -49,3 +49,28 @@ def time_value_counts_pindex(self): self.i.value_counts() +class period_standard_indexing(object): + goal_time = 0.2 + + def setup(self): + self.index = PeriodIndex(start='1985', periods=1000, freq='D') + self.series = Series(range(1000), index=self.index) + self.period = self.index[500] + + def time_get_loc(self): + self.index.get_loc(self.period) + + def time_shape(self): + self.index.shape + + def time_shallow_copy(self): + self.index._shallow_copy() + + def time_series_loc(self): + self.series.loc[self.period] + + def time_align(self): + pd.DataFrame({'a': self.series, 'b': self.series[:500]}) + + def time_intersection(self): + self.index[:750].intersection(self.index[250:]) diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index c94e08ec41760..72dbca223ef71 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -22,6 +22,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of ``.replace()`` (:issue:`12745`) +- Improved performance of ``PeriodIndex`` (:issue:`14822`) - Improved performance ``Series`` creation with a datetime index and dictionary data (:issue:`14894`) .. _whatsnew_0192.enhancements.other: diff --git a/pandas/core/base.py b/pandas/core/base.py index a0365ce484a5a..49e43a60403ca 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -814,7 +814,7 @@ def transpose(self, *args, **kwargs): @property def shape(self): """ return a tuple of the shape of the underlying data """ - return self.values.shape + return self._values.shape @property def ndim(self): @@ -842,22 +842,22 @@ def data(self): @property def itemsize(self): """ return the size of the dtype of the item of the underlying data """ - return self.values.itemsize + return self._values.itemsize @property def nbytes(self): """ return the number of bytes in the underlying data """ - return self.values.nbytes + return self._values.nbytes @property def strides(self): """ return the strides of the underlying data """ - return self.values.strides + return self._values.strides @property def size(self): """ return the number of elements in the underlying data """ - return self.values.size + return self._values.size @property def flags(self): diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 4bab3bc14461e..8c75195b25ef5 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -65,6 +65,7 @@ def dt64arr_to_periodarr(data, freq, tz): # --- Period index sketch + _DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX @@ -305,7 +306,7 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): if (len(values) > 0 and is_float_dtype(values)): raise TypeError("PeriodIndex can't take floats") else: - return PeriodIndex(values, name=name, freq=freq, **kwargs) + return cls(values, name=name, freq=freq, **kwargs) values = np.array(values, dtype='int64', copy=False) @@ -326,6 +327,8 @@ def _shallow_copy(self, values=None, **kwargs): if kwargs.get('freq') is None: # freq must be provided kwargs['freq'] = self.freq + if values is None: + values = self._values return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs) def _coerce_scalar_to_index(self, item): @@ -356,9 +359,8 @@ def __contains__(self, key): def asi8(self): return self._values.view('i8') - @property + @cache_readonly def _int64index(self): - # do not cache, same as .asi8 return Int64Index(self.asi8, name=self.name, fastpath=True) @property diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index dae1554c0930e..ad4f669fceb42 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2101,14 +2101,6 @@ def test_comp_period(self): exp = idx.values < idx.values[10] self.assert_numpy_array_equal(result, exp) - def test_getitem_ndim2(self): - idx = period_range('2007-01', periods=3, freq='M') - - result = idx[:, None] - # MPL kludge, internally has incorrect shape - tm.assertIsInstance(result, PeriodIndex) - self.assertEqual(result.shape, (len(idx), )) - def test_getitem_index(self): idx = period_range('2007-01', periods=10, freq='M', name='x')
- [x] closes #https://github.com/pandas-dev/pandas/issues/14822 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14931
2016-12-20T18:46:30Z
2016-12-21T01:58:20Z
2016-12-21T01:58:20Z
2016-12-21T03:28:33Z
BUG: bug in Series construction from UTC
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index dc06fbd159457..d42e8edc6dfe0 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -253,7 +253,7 @@ Bug Fixes - +- Bug in ``Series`` construction with a datetimetz (:issue:`14928`) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 80de3cd85d4db..396b0e048bc49 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -545,9 +545,9 @@ def _offset(lvalues, rvalues): # with tz, convert to UTC if self.is_datetime64tz_lhs: - lvalues = lvalues.tz_localize(None) + lvalues = lvalues.tz_convert('UTC').tz_localize(None) if self.is_datetime64tz_rhs: - rvalues = rvalues.tz_localize(None) + rvalues = rvalues.tz_convert('UTC').tz_localize(None) lvalues = lvalues.view(np.int64) rvalues = rvalues.view(np.int64) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index ed7b0fda19cb7..a7e3ebdfc43d0 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -529,6 +529,21 @@ def test_constructor_with_datetime_tz(self): expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern')) assert_series_equal(s, expected) + def test_construction_consistency(self): + + # make sure that we are not re-localizing upon construction + # GH 14928 + s = Series(pd.date_range('20130101', periods=3, tz='US/Eastern')) + + result = Series(s, dtype=s.dtype) + tm.assert_series_equal(result, s) + + result = Series(s.dt.tz_convert('UTC'), dtype=s.dtype) + tm.assert_series_equal(result, s) + + result = Series(s.values, dtype=s.dtype) + tm.assert_series_equal(result, s) + def test_constructor_periodindex(self): # GH7932 # converting a PeriodIndex when put in a Series diff --git a/pandas/types/cast.py b/pandas/types/cast.py index 4f4f95d5a455b..ff4fb73d6a9b6 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -823,9 +823,10 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'): elif is_datetime64tz: # input has to be UTC at this point, so just # localize - value = to_datetime( - value, - errors=errors).tz_localize(dtype.tz) + value = (to_datetime(value, errors=errors) + .tz_localize('UTC') + .tz_convert(dtype.tz) + ) elif is_timedelta64: value = to_timedelta(value, errors=errors)._values except (AttributeError, ValueError, TypeError):
xref #14918
https://api.github.com/repos/pandas-dev/pandas/pulls/14928
2016-12-20T16:44:32Z
2016-12-20T17:48:14Z
null
2016-12-20T17:48:14Z
TST: Groupby.filter dropna=False with empty group (#10780)
diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 81bf977e924d8..fb0f52886ec31 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -596,6 +596,19 @@ def test_filter_non_bool_raises(self): with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'): df.groupby('a').filter(lambda g: g.c.mean()) + def test_filter_dropna_with_empty_groups(self): + # GH 10780 + data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3)) + groupped = data.groupby(level=0) + result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False) + expected_false = pd.Series([np.nan] * 9, + index=np.repeat([1, 2, 3], 3)) + tm.assert_series_equal(result_false, expected_false) + + result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True) + expected_true = pd.Series(index=pd.Index([], dtype=int)) + tm.assert_series_equal(result_true, expected_true) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
- [x] closes #10780 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Didn't find a PR in 0.20 or 0.19.2 that closed this, but works on master 0.19.1
https://api.github.com/repos/pandas-dev/pandas/pulls/14926
2016-12-20T06:36:18Z
2016-12-20T20:44:26Z
2016-12-20T20:44:26Z
2016-12-21T04:23:14Z
PERF: make all inference routines cpdef bint
diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 548a96780d37a..761969491cfc7 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -257,7 +257,7 @@ cdef double INF = <double> np.inf cdef double NEGINF = -INF -cpdef checknull(object val): +cpdef bint checknull(object val): if util.is_float_object(val) or util.is_complex_object(val): return val != val # and val != INF and val != NEGINF elif util.is_datetime64_object(val): @@ -272,7 +272,7 @@ cpdef checknull(object val): return _checknull(val) -cpdef checknull_old(object val): +cpdef bint checknull_old(object val): if util.is_float_object(val) or util.is_complex_object(val): return val != val or val == INF or val == NEGINF elif util.is_datetime64_object(val): @@ -287,21 +287,21 @@ cpdef checknull_old(object val): return util._checknull(val) -cpdef isposinf_scalar(object val): +cpdef bint isposinf_scalar(object val): if util.is_float_object(val) and val == INF: return True else: return False -cpdef isneginf_scalar(object val): +cpdef bint isneginf_scalar(object val): if util.is_float_object(val) and val == NEGINF: return True else: return False -def isscalar(object val): +cpdef bint isscalar(object val): """ Return True if given value is scalar. diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index a8b694d7ba008..2e6cfe7c7e9ad 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -14,29 +14,31 @@ from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX, # core.common import for fast inference checks -def is_float(object obj): +cpdef bint is_float(object obj): return util.is_float_object(obj) -def is_integer(object obj): +cpdef bint is_integer(object obj): return util.is_integer_object(obj) -def is_bool(object obj): +cpdef bint is_bool(object obj): return util.is_bool_object(obj) -def is_complex(object obj): +cpdef bint is_complex(object obj): return util.is_complex_object(obj) -def is_decimal(object obj): +cpdef bint is_decimal(object obj): return isinstance(obj, Decimal) + cpdef bint is_period(object val): """ Return a boolean if this is a Period object """ return util.is_period_object(val) + _TYPE_MAP = { 'categorical': 'categorical', 'category': 'categorical', @@ -234,7 +236,7 @@ def infer_dtype(object _values): return 'mixed' -def is_possible_datetimelike_array(object arr): +cpdef bint is_possible_datetimelike_array(object arr): # determine if we have a possible datetimelike (or null-like) array cdef: Py_ssize_t i, n = len(arr) @@ -319,7 +321,7 @@ cdef inline bint is_timedelta(object o): return PyDelta_Check(o) or util.is_timedelta64_object(o) -def is_bool_array(ndarray values): +cpdef bint is_bool_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -340,11 +342,7 @@ def is_bool_array(ndarray values): return False -def is_integer(object o): - return util.is_integer_object(o) - - -def is_integer_array(ndarray values): +cpdef bint is_integer_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -365,7 +363,7 @@ def is_integer_array(ndarray values): return False -def is_integer_float_array(ndarray values): +cpdef bint is_integer_float_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -388,7 +386,7 @@ def is_integer_float_array(ndarray values): return False -def is_float_array(ndarray values): +cpdef bint is_float_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -409,7 +407,7 @@ def is_float_array(ndarray values): return False -def is_string_array(ndarray values): +cpdef bint is_string_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -431,7 +429,7 @@ def is_string_array(ndarray values): return False -def is_unicode_array(ndarray values): +cpdef bint is_unicode_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -452,7 +450,7 @@ def is_unicode_array(ndarray values): return False -def is_bytes_array(ndarray values): +cpdef bint is_bytes_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf @@ -473,7 +471,7 @@ def is_bytes_array(ndarray values): return False -def is_datetime_array(ndarray[object] values): +cpdef bint is_datetime_array(ndarray[object] values): cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: @@ -491,7 +489,7 @@ def is_datetime_array(ndarray[object] values): return null_count != n -def is_datetime64_array(ndarray values): +cpdef bint is_datetime64_array(ndarray values): cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: @@ -509,7 +507,7 @@ def is_datetime64_array(ndarray values): return null_count != n -cpdef is_datetime_with_singletz_array(ndarray[object] values): +cpdef bint is_datetime_with_singletz_array(ndarray[object] values): """ Check values have the same tzinfo attribute. Doesn't check values are datetime-like types. @@ -537,7 +535,7 @@ cpdef is_datetime_with_singletz_array(ndarray[object] values): return True -def is_timedelta_array(ndarray values): +cpdef bint is_timedelta_array(ndarray values): cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: @@ -553,7 +551,7 @@ def is_timedelta_array(ndarray values): return null_count != n -def is_timedelta64_array(ndarray values): +cpdef bint is_timedelta64_array(ndarray values): cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: @@ -569,7 +567,7 @@ def is_timedelta64_array(ndarray values): return null_count != n -def is_timedelta_or_timedelta64_array(ndarray values): +cpdef bint is_timedelta_or_timedelta64_array(ndarray values): """ infer with timedeltas and/or nat/none """ cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v @@ -586,7 +584,7 @@ def is_timedelta_or_timedelta64_array(ndarray values): return null_count != n -def is_date_array(ndarray[object] values): +cpdef bint is_date_array(ndarray[object] values): cdef Py_ssize_t i, n = len(values) if n == 0: return False @@ -596,7 +594,7 @@ def is_date_array(ndarray[object] values): return True -def is_time_array(ndarray[object] values): +cpdef bint is_time_array(ndarray[object] values): cdef Py_ssize_t i, n = len(values) if n == 0: return False @@ -606,7 +604,7 @@ def is_time_array(ndarray[object] values): return True -def is_period_array(ndarray[object] values): +cpdef bint is_period_array(ndarray[object] values): cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0:
going to rebase after #14827
https://api.github.com/repos/pandas-dev/pandas/pulls/14925
2016-12-19T23:33:29Z
2016-12-20T14:11:33Z
null
2016-12-20T14:11:33Z
MAINT: Only output errors in C style check
diff --git a/ci/lint.sh b/ci/lint.sh index d7df6215450b4..32ac606a4d30a 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -7,6 +7,8 @@ source activate pandas RET=0 if [ "$LINT" ]; then + pip install cpplint + # pandas/rpy is deprecated and will be removed. # pandas/src is C code, so no need to search there. echo "Linting *.py" @@ -43,13 +45,11 @@ if [ "$LINT" ]; then # from Cython files nor do we want to lint C files that we didn't modify for # this particular codebase (e.g. src/headers, src/klib, src/msgpack). However, # we can lint all header files since they aren't "generated" like C files are. - pip install cpplint - echo "Linting *.c and *.h" for path in '*.h' 'period_helper.c' 'datetime' 'parser' 'ujson' do echo "linting -> pandas/src/$path" - cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/src/$path + cpplint --quiet --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive pandas/src/$path if [ $? -ne "0" ]; then RET=1 fi
Title is self-explanatory. Follow-up to #14814.
https://api.github.com/repos/pandas-dev/pandas/pulls/14924
2016-12-19T16:12:12Z
2016-12-19T19:20:52Z
2016-12-19T19:20:52Z
2016-12-19T19:22:21Z
TST: Test empty input for read_csv (#14867)
diff --git a/pandas/io/tests/parser/test_textreader.py b/pandas/io/tests/parser/test_textreader.py index 49b70fc5e8703..98cb09cd85480 100644 --- a/pandas/io/tests/parser/test_textreader.py +++ b/pandas/io/tests/parser/test_textreader.py @@ -392,6 +392,12 @@ def test_empty_field_eof(self): names=list('abcd'), engine='c') assert_frame_equal(df, c) + def test_empty_csv_input(self): + # GH14867 + df = read_csv(StringIO(), chunksize=20, header=None, + names=['a', 'b', 'c']) + self.assertTrue(isinstance(df, TextFileReader)) + def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left):
- [x ] closes #14867 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` - [ ] whatsnew entry LMK if it would be more suitable in a different test file.
https://api.github.com/repos/pandas-dev/pandas/pulls/14920
2016-12-19T14:25:24Z
2016-12-20T16:03:24Z
2016-12-20T16:03:23Z
2017-01-26T01:19:17Z
CLN: move unique1d to algorithms from nanops
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index b2702ea0acca7..6bcd3776867b6 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -113,6 +113,38 @@ def _unique_generic(values, table_type, type_caster): return type_caster(uniques) +def unique1d(values): + """ + Hash table-based unique + """ + if np.issubdtype(values.dtype, np.floating): + table = htable.Float64HashTable(len(values)) + uniques = np.array(table.unique(_ensure_float64(values)), + dtype=np.float64) + elif np.issubdtype(values.dtype, np.datetime64): + table = htable.Int64HashTable(len(values)) + uniques = table.unique(_ensure_int64(values)) + uniques = uniques.view('M8[ns]') + elif np.issubdtype(values.dtype, np.timedelta64): + table = htable.Int64HashTable(len(values)) + uniques = table.unique(_ensure_int64(values)) + uniques = uniques.view('m8[ns]') + elif np.issubdtype(values.dtype, np.integer): + table = htable.Int64HashTable(len(values)) + uniques = table.unique(_ensure_int64(values)) + else: + + # its cheaper to use a String Hash Table than Object + if lib.infer_dtype(values) in ['string']: + table = htable.StringHashTable(len(values)) + else: + table = htable.PyObjectHashTable(len(values)) + + uniques = table.unique(_ensure_object(values)) + + return uniques + + def isin(comps, values): """ Compute the isin boolean array diff --git a/pandas/core/base.py b/pandas/core/base.py index d412349447794..a0365ce484a5a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -969,7 +969,7 @@ def unique(self): if hasattr(values, 'unique'): result = values.unique() else: - from pandas.core.nanops import unique1d + from pandas.core.algorithms import unique1d result = unique1d(values) return result diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 5124dc44e2fc8..7f2e6093d0f4c 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -25,7 +25,7 @@ is_scalar) from pandas.core.common import is_null_slice -from pandas.core.algorithms import factorize, take_1d +from pandas.core.algorithms import factorize, take_1d, unique1d from pandas.core.base import (PandasObject, PandasDelegate, NoNewAttributesMixin, _shared_docs) import pandas.core.common as com @@ -1834,7 +1834,6 @@ def unique(self): unique values : ``Categorical`` """ - from pandas.core.nanops import unique1d # unlike np.unique, unique1d does not sort unique_codes = unique1d(self.codes) cat = self.copy() diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a76e348b7dee2..1f76bc850cee9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -9,10 +9,8 @@ except ImportError: # pragma: no cover _USE_BOTTLENECK = False -import pandas.hashtable as _hash from pandas import compat, lib, algos, tslib -from pandas.types.common import (_ensure_int64, _ensure_object, - _ensure_float64, _get_dtype, +from pandas.types.common import (_get_dtype, is_float, is_scalar, is_integer, is_complex, is_float_dtype, is_complex_dtype, is_integer_dtype, @@ -784,28 +782,3 @@ def f(x, y): nanle = make_nancomp(operator.le) naneq = make_nancomp(operator.eq) nanne = make_nancomp(operator.ne) - - -def unique1d(values): - """ - Hash table-based unique - """ - if np.issubdtype(values.dtype, np.floating): - table = _hash.Float64HashTable(len(values)) - uniques = np.array(table.unique(_ensure_float64(values)), - dtype=np.float64) - elif np.issubdtype(values.dtype, np.datetime64): - table = _hash.Int64HashTable(len(values)) - uniques = table.unique(_ensure_int64(values)) - uniques = uniques.view('M8[ns]') - elif np.issubdtype(values.dtype, np.timedelta64): - table = _hash.Int64HashTable(len(values)) - uniques = table.unique(_ensure_int64(values)) - uniques = uniques.view('m8[ns]') - elif np.issubdtype(values.dtype, np.integer): - table = _hash.Int64HashTable(len(values)) - uniques = table.unique(_ensure_int64(values)) - else: - table = _hash.PyObjectHashTable(len(values)) - uniques = table.unique(_ensure_object(values)) - return uniques diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index d0c909b9c1b30..92a9184ad30fc 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -277,28 +277,6 @@ def test_factorize_nan(self): self.assertTrue( np.array_equal(pd.isnull(key), expected == na_sentinel)) - def test_vector_resize(self): - # Test for memory errors after internal vector - # reallocations (pull request #7157) - - def _test_vector_resize(htable, uniques, dtype, nvals): - vals = np.array(np.random.randn(1000), dtype=dtype) - # get_labels appends to the vector - htable.get_labels(vals[:nvals], uniques, 0, -1) - # to_array resizes the vector - uniques.to_array() - htable.get_labels(vals, uniques, 0, -1) - - test_cases = [ - (hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'), - (hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'), - (hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')] - - for (tbl, vect, dtype) in test_cases: - # resizing to empty is a special case - _test_vector_resize(tbl(), vect(), dtype, 0) - _test_vector_resize(tbl(), vect(), dtype, 10) - def test_complex_sorting(self): # gh 12666 - check no segfault # Test not valid numpy versions older than 1.11 @@ -912,6 +890,39 @@ class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin): rtol = 1e-2 +class TestHashTable(tm.TestCase): + + def test_lookup_nan(self): + xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3]) + m = hashtable.Float64HashTable() + m.map_locations(xs) + self.assert_numpy_array_equal(m.lookup(xs), + np.arange(len(xs), dtype=np.int64)) + + def test_vector_resize(self): + # Test for memory errors after internal vector + # reallocations (pull request #7157) + + def _test_vector_resize(htable, uniques, dtype, nvals): + vals = np.array(np.random.randn(1000), dtype=dtype) + # get_labels appends to the vector + htable.get_labels(vals[:nvals], uniques, 0, -1) + # to_array resizes the vector + uniques.to_array() + htable.get_labels(vals, uniques, 0, -1) + + test_cases = [ + (hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'), + (hashtable.StringHashTable, hashtable.ObjectVector, 'object'), + (hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'), + (hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')] + + for (tbl, vect, dtype) in test_cases: + # resizing to empty is a special case + _test_vector_resize(tbl(), vect(), dtype, 0) + _test_vector_resize(tbl(), vect(), dtype, 10) + + def test_quantile(): s = Series(np.random.randn(100)) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index a5cd0bbc28369..717eae3e59715 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1051,17 +1051,6 @@ def test_searchsorted(self): self.assertTrue(0 <= index <= len(o)) -class TestFloat64HashTable(tm.TestCase): - - def test_lookup_nan(self): - from pandas.hashtable import Float64HashTable - xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3]) - m = Float64HashTable() - m.map_locations(xs) - self.assert_numpy_array_equal(m.lookup(xs), - np.arange(len(xs), dtype=np.int64)) - - class TestTranspose(Ops): errmsg = "the 'axes' parameter is not supported" diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index 59daa8d7780b4..dc460dee8415b 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -4,7 +4,7 @@ import numpy as np from pandas.types.common import _ensure_platform_int from pandas.core.frame import DataFrame -import pandas.core.nanops as nanops +import pandas.core.algorithms as algorithms def pivot_annual(series, freq=None): @@ -45,7 +45,7 @@ def pivot_annual(series, freq=None): index = series.index year = index.year - years = nanops.unique1d(year) + years = algorithms.unique1d(year) if freq is not None: freq = freq.upper()
TST: consolidate hashtable testing to test_algos.py
https://api.github.com/repos/pandas-dev/pandas/pulls/14919
2016-12-19T11:31:18Z
2016-12-19T14:03:00Z
2016-12-19T14:03:00Z
2016-12-19T14:03:00Z
BUG: Properly read Categorical msgpacks
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 46d275df00431..165df1acf0ebf 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -278,6 +278,7 @@ Bug Fixes +- Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 1838d9175e597..ab44e46c96b77 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -593,17 +593,13 @@ def decode(obj): elif typ == u'series': dtype = dtype_for(obj[u'dtype']) pd_dtype = pandas_dtype(dtype) - np_dtype = pandas_dtype(dtype).base index = obj[u'index'] result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype, obj[u'compress']), index=index, - dtype=np_dtype, + dtype=pd_dtype, name=obj[u'name']) - tz = getattr(pd_dtype, 'tz', None) - if tz: - result = result.dt.tz_localize('UTC').dt.tz_convert(tz) return result elif typ == u'block_manager': diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py index 91042775ba19d..63c2ffc629ca6 100644 --- a/pandas/io/tests/test_packers.py +++ b/pandas/io/tests/test_packers.py @@ -363,6 +363,8 @@ def setUp(self): 'F': [Timestamp('20130102', tz='US/Eastern')] * 2 + [Timestamp('20130603', tz='CET')] * 3, 'G': [Timestamp('20130102', tz='US/Eastern')] * 5, + 'H': Categorical([1, 2, 3, 4, 5]), + 'I': Categorical([1, 2, 3, 4, 5], ordered=True), } self.d['float'] = Series(data['A']) @@ -370,6 +372,8 @@ def setUp(self): self.d['mixed'] = Series(data['E']) self.d['dt_tz_mixed'] = Series(data['F']) self.d['dt_tz'] = Series(data['G']) + self.d['cat_ordered'] = Series(data['H']) + self.d['cat_unordered'] = Series(data['I']) def test_basic(self):
Patches bug in `read_msgpack` in which `Series` categoricals were accidentally being constructed with a non-categorical dtype, resulting in an error. Closes #14901.
https://api.github.com/repos/pandas-dev/pandas/pulls/14918
2016-12-19T09:20:50Z
2016-12-21T11:28:03Z
2016-12-21T11:28:03Z
2016-12-21T15:46:19Z
BUG: Don't convert uint64 to object in DataFrame init
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 76ba4a5f723fa..2acc1c9d67ec5 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -235,6 +235,7 @@ Bug Fixes ~~~~~~~~~ - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) +- Bug in ``DataFrame`` construction in which unsigned 64-bit integer elements were being converted to objects (:issue:`14881`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 120a9cbcd1a75..05ac3356c1770 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -4314,11 +4314,6 @@ def form_blocks(arrays, names, axes): elif is_datetimetz(v): datetime_tz_items.append((i, k, v)) elif issubclass(v.dtype.type, np.integer): - if v.dtype == np.uint64: - # HACK #2355 definite overflow - if (v > 2**63 - 1).any(): - object_items.append((i, k, v)) - continue int_items.append((i, k, v)) elif v.dtype == np.bool_: bool_items.append((i, k, v)) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 489c85a7234b8..bf0fabaf3e402 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -183,13 +183,14 @@ def test_constructor_bool(self): self.assertEqual(df.values.dtype, np.bool_) def test_constructor_overflow_int64(self): + # see gh-14881 values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64) result = DataFrame({'a': values}) - self.assertEqual(result['a'].dtype, object) + self.assertEqual(result['a'].dtype, np.uint64) - # #2355 + # see gh-2355 data_scores = [(6311132704823138710, 273), (2685045978526272070, 23), (8921811264899370420, 45), (long(17019687244989530680), 270), @@ -198,7 +199,7 @@ def test_constructor_overflow_int64(self): data = np.zeros((len(data_scores),), dtype=dtype) data[:] = data_scores df_crawls = DataFrame(data) - self.assertEqual(df_crawls['uid'].dtype, object) + self.assertEqual(df_crawls['uid'].dtype, np.uint64) def test_constructor_ordereddict(self): import random diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 85aadee8b0900..8462d5cd9bcf6 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -378,10 +378,10 @@ def test_arith_flex_frame(self): result = getattr(self.mixed_int, op)(2 + self.mixed_int) exp = f(self.mixed_int, 2 + self.mixed_int) - # overflow in the uint + # no overflow in the uint dtype = None if op in ['sub']: - dtype = dict(B='object', C=None) + dtype = dict(B='uint64', C=None) elif op in ['add', 'mul']: dtype = dict(C=None) assert_frame_equal(result, exp) @@ -410,10 +410,10 @@ def test_arith_flex_frame(self): 2 + self.mixed_int) exp = f(self.mixed_int, 2 + self.mixed_int) - # overflow in the uint + # no overflow in the uint dtype = None if op in ['sub']: - dtype = dict(B='object', C=None) + dtype = dict(B='uint64', C=None) elif op in ['add', 'mul']: dtype = dict(C=None) assert_frame_equal(result, exp)
The hack used to resolve #2355 is no longer needed. Removes the hack and patches several tests that relied on this hacky (and buggy) behavior. Closes #14881.
https://api.github.com/repos/pandas-dev/pandas/pulls/14917
2016-12-19T07:46:34Z
2016-12-19T16:59:50Z
2016-12-19T16:59:50Z
2016-12-19T18:18:00Z
BUG: Convert uint64 in maybe_convert_objects
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 83a70aa34fccf..0dbd3ac6a2ba7 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -259,3 +259,4 @@ Bug Fixes - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) +- Bug in converting object elements of array-like objects to unsigned 64-bit integers (:issue:`4471`) diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 5ac2c70bb1808..3fe8092c0041c 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -12,6 +12,8 @@ from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX, # core.common import for fast inference checks +npy_int64_max = np.iinfo(np.int64).max + def is_float(object obj): return util.is_float_object(obj) @@ -722,6 +724,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, ndarray[float64_t] floats ndarray[complex128_t] complexes ndarray[int64_t] ints + ndarray[uint64_t] uints ndarray[uint8_t] bools ndarray[int64_t] idatetimes ndarray[int64_t] itimedeltas @@ -731,6 +734,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, bint seen_datetimetz = 0 bint seen_timedelta = 0 bint seen_int = 0 + bint seen_uint = 0 + bint seen_sint = 0 bint seen_bool = 0 bint seen_object = 0 bint seen_null = 0 @@ -743,6 +748,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats = np.empty(n, dtype='f8') complexes = np.empty(n, dtype='c16') ints = np.empty(n, dtype='i8') + uints = np.empty(n, dtype='u8') bools = np.empty(n, dtype=np.uint8) if convert_datetime: @@ -798,11 +804,21 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats[i] = <float64_t> val complexes[i] = <double complex> val if not seen_null: - try: - ints[i] = val - except OverflowError: + seen_uint = seen_uint or (val > npy_int64_max) + seen_sint = seen_sint or (val < 0) + + if seen_uint and seen_sint: seen_object = 1 break + + if seen_uint: + uints[i] = val + elif seen_sint: + ints[i] = val + else: + uints[i] = val + ints[i] = val + elif util.is_complex_object(val): complexes[i] = val seen_complex = 1 @@ -865,7 +881,10 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, elif seen_float: return floats elif seen_int: - return ints + if seen_uint: + return uints + else: + return ints elif (not seen_datetime and not seen_numeric and not seen_timedelta): return bools.view(np.bool_) @@ -896,7 +915,10 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, if not seen_int: return floats elif seen_int: - return ints + if seen_uint: + return uints + else: + return ints elif (not seen_datetime and not seen_numeric and not seen_timedelta): return bools.view(np.bool_) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index e51cc0f5a6ec7..706820b06b12e 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -142,7 +142,7 @@ def test_constructor_with_convert(self): df = DataFrame({'A': [2 ** 63]}) result = df['A'] - expected = Series(np.asarray([2 ** 63], np.object_), name='A') + expected = Series(np.asarray([2 ** 63], np.uint64), name='A') assert_series_equal(result, expected) df = DataFrame({'A': [datetime(2005, 1, 1), True]}) diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index a63ae5f7cf74e..f83ad51c2f648 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -254,6 +254,20 @@ def test_convert_non_hashable(self): result = lib.maybe_convert_numeric(arr, set(), False, True) tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan])) + def test_maybe_convert_objects_uint64(self): + # see gh-4471 + arr = np.array([2**63], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + + arr = np.array([2, -1], dtype=object) + exp = np.array([2, -1], dtype=np.int64) + tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + + arr = np.array([2**63, -1], dtype=object) + exp = np.array([2**63, -1], dtype=object) + tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + class TestTypeInference(tm.TestCase): _multiprocess_can_split_ = True
Adds handling for `uint64` objects during conversion. When negative numbers and `uint64` are detected, we then convert the result to `object`. Picks up where #4845 left off. Closes #4471.
https://api.github.com/repos/pandas-dev/pandas/pulls/14916
2016-12-19T06:42:33Z
2016-12-20T13:49:05Z
null
2016-12-20T14:39:46Z
BUG: Prevent uint64 overflow in Series.unique
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 83a70aa34fccf..a8421535636f9 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -258,4 +258,6 @@ Bug Fixes + +- Bug in ``Series.unique()`` in which unsigned 64-bit integers were causing overflow (:issue:`14721`) - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6bcd3776867b6..e51774ce4d9b4 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -25,6 +25,7 @@ _ensure_platform_int, _ensure_object, _ensure_float64, + _ensure_uint64, _ensure_int64, is_list_like) from pandas.compat.numpy import _np_version_under1p10 @@ -129,9 +130,12 @@ def unique1d(values): table = htable.Int64HashTable(len(values)) uniques = table.unique(_ensure_int64(values)) uniques = uniques.view('m8[ns]') - elif np.issubdtype(values.dtype, np.integer): + elif np.issubdtype(values.dtype, np.signedinteger): table = htable.Int64HashTable(len(values)) uniques = table.unique(_ensure_int64(values)) + elif np.issubdtype(values.dtype, np.unsignedinteger): + table = htable.UInt64HashTable(len(values)) + uniques = table.unique(_ensure_uint64(values)) else: # its cheaper to use a String Hash Table than Object diff --git a/pandas/hashtable.pxd b/pandas/hashtable.pxd index f3ea7ad792160..cd06b938310a8 100644 --- a/pandas/hashtable.pxd +++ b/pandas/hashtable.pxd @@ -1,10 +1,17 @@ -from khash cimport kh_int64_t, kh_float64_t, kh_pymap_t, kh_str_t, int64_t, float64_t +from khash cimport (kh_int64_t, kh_uint64_t, kh_float64_t, kh_pymap_t, + kh_str_t, uint64_t, int64_t, float64_t) # prototypes for sharing cdef class HashTable: pass +cdef class UInt64HashTable(HashTable): + cdef kh_uint64_t *table + + cpdef get_item(self, uint64_t val) + cpdef set_item(self, uint64_t key, Py_ssize_t val) + cdef class Int64HashTable(HashTable): cdef kh_int64_t *table diff --git a/pandas/src/algos_common_helper.pxi.in b/pandas/src/algos_common_helper.pxi.in index c52c734f727e9..c1c190704b4c7 100644 --- a/pandas/src/algos_common_helper.pxi.in +++ b/pandas/src/algos_common_helper.pxi.in @@ -553,6 +553,7 @@ dtypes = [('float64', 'FLOAT64', 'float64'), ('int16', 'INT16', 'int16'), ('int32', 'INT32', 'int32'), ('int64', 'INT64', 'int64'), + ('uint64', 'UINT64', 'uint64'), # ('platform_int', 'INT', 'int_'), # ('object', 'OBJECT', 'object_'), ] diff --git a/pandas/src/hashtable_class_helper.pxi.in b/pandas/src/hashtable_class_helper.pxi.in index 22714e6305677..55c840b20c78b 100644 --- a/pandas/src/hashtable_class_helper.pxi.in +++ b/pandas/src/hashtable_class_helper.pxi.in @@ -17,7 +17,8 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in dtypes = [('Float64', 'float64', 'float64_t'), ('Int64', 'int64', 'int64_t'), - ('String', 'string', 'char *')] + ('String', 'string', 'char *'), + ('UInt64', 'uint64', 'uint64_t')] }} {{for name, dtype, arg in dtypes}} @@ -40,6 +41,7 @@ cdef inline void append_data_{{dtype}}({{name}}VectorData *data, ctypedef fused vector_data: Int64VectorData + UInt64VectorData Float64VectorData StringVectorData @@ -54,6 +56,7 @@ cdef inline bint needs_resize(vector_data *data) nogil: # name, dtype, arg, idtype dtypes = [('Float64', 'float64', 'float64_t', 'np.float64'), + ('UInt64', 'uint64', 'uint64_t', 'np.uint64'), ('Int64', 'int64', 'int64_t', 'np.int64')] }} @@ -201,6 +204,7 @@ cdef class HashTable: # name, dtype, null_condition, float_group dtypes = [('Float64', 'float64', 'val != val', True), + ('UInt64', 'uint64', 'val == 0', False), ('Int64', 'int64', 'val == iNaT', False)] }} diff --git a/pandas/src/hashtable_func_helper.pxi.in b/pandas/src/hashtable_func_helper.pxi.in index 1840b914f3328..f3e16cfd32963 100644 --- a/pandas/src/hashtable_func_helper.pxi.in +++ b/pandas/src/hashtable_func_helper.pxi.in @@ -11,7 +11,7 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in {{py: # name -dtypes = ['float64', 'int64'] +dtypes = ['float64', 'int64', 'uint64'] }} diff --git a/pandas/src/khash.pxd b/pandas/src/khash.pxd index b28f43eecfac7..adb0fe285dbb8 100644 --- a/pandas/src/khash.pxd +++ b/pandas/src/khash.pxd @@ -1,5 +1,5 @@ from cpython cimport PyObject -from numpy cimport int64_t, int32_t, uint32_t, float64_t +from numpy cimport int64_t, uint64_t, int32_t, uint32_t, float64_t cdef extern from "khash_python.h": ctypedef uint32_t khint_t @@ -55,7 +55,6 @@ cdef extern from "khash_python.h": bint kh_exist_str(kh_str_t*, khiter_t) nogil - ctypedef struct kh_int64_t: khint_t n_buckets, size, n_occupied, upper_bound uint32_t *flags @@ -72,6 +71,24 @@ cdef extern from "khash_python.h": bint kh_exist_int64(kh_int64_t*, khiter_t) nogil + ctypedef uint64_t khuint64_t + + ctypedef struct kh_uint64_t: + khint_t n_buckets, size, n_occupied, upper_bound + uint32_t *flags + khuint64_t *keys + size_t *vals + + inline kh_uint64_t* kh_init_uint64() nogil + inline void kh_destroy_uint64(kh_uint64_t*) nogil + inline void kh_clear_uint64(kh_uint64_t*) nogil + inline khint_t kh_get_uint64(kh_uint64_t*, int64_t) nogil + inline void kh_resize_uint64(kh_uint64_t*, khint_t) nogil + inline khint_t kh_put_uint64(kh_uint64_t*, int64_t, int*) nogil + inline void kh_del_uint64(kh_uint64_t*, khint_t) nogil + + bint kh_exist_uint64(kh_uint64_t*, khiter_t) nogil + ctypedef struct kh_float64_t: khint_t n_buckets, size, n_occupied, upper_bound uint32_t *flags diff --git a/pandas/src/klib/khash.h b/pandas/src/klib/khash.h index dc004a0e1770b..869607a44c001 100644 --- a/pandas/src/klib/khash.h +++ b/pandas/src/klib/khash.h @@ -567,12 +567,14 @@ typedef const char *kh_cstr_t; #define kh_exist_str(h, k) (kh_exist(h, k)) #define kh_exist_float64(h, k) (kh_exist(h, k)) +#define kh_exist_uint64(h, k) (kh_exist(h, k)) #define kh_exist_int64(h, k) (kh_exist(h, k)) #define kh_exist_int32(h, k) (kh_exist(h, k)) KHASH_MAP_INIT_STR(str, size_t) KHASH_MAP_INIT_INT(int32, size_t) KHASH_MAP_INIT_INT64(int64, size_t) +KHASH_MAP_INIT_UINT64(uint64, size_t) #endif /* __AC_KHASH_H */ diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 92a9184ad30fc..7f1745edbb816 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -899,6 +899,18 @@ def test_lookup_nan(self): self.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64)) + def test_lookup_overflow(self): + xs = np.array([1, 2, 2**63], dtype=np.uint64) + m = hashtable.UInt64HashTable() + m.map_locations(xs) + self.assert_numpy_array_equal(m.lookup(xs), + np.arange(len(xs), dtype=np.int64)) + + def test_get_unique(self): + s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64) + exp = np.array([1, 2, 2**63], dtype=np.uint64) + self.assert_numpy_array_equal(s.unique(), exp) + def test_vector_resize(self): # Test for memory errors after internal vector # reallocations (pull request #7157) @@ -915,7 +927,8 @@ def _test_vector_resize(htable, uniques, dtype, nvals): (hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'), (hashtable.StringHashTable, hashtable.ObjectVector, 'object'), (hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'), - (hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')] + (hashtable.Int64HashTable, hashtable.Int64Vector, 'int64'), + (hashtable.UInt64HashTable, hashtable.UInt64Vector, 'uint64')] for (tbl, vect, dtype) in test_cases: # resizing to empty is a special case diff --git a/pandas/types/common.py b/pandas/types/common.py index a7ba96f95e31b..06c8ef6e35cd7 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -32,6 +32,8 @@ def _ensure_float(arr): arr = arr.astype(float) return arr + +_ensure_uint64 = algos.ensure_uint64 _ensure_int64 = algos.ensure_int64 _ensure_int32 = algos.ensure_int32 _ensure_int16 = algos.ensure_int16
Introduces a `UInt64HashTable` class to hash `uint64` elements and prevent overflow in functions like `Series.unique`. Closes #14721.
https://api.github.com/repos/pandas-dev/pandas/pulls/14915
2016-12-19T05:43:48Z
2016-12-20T13:46:00Z
null
2016-12-20T14:39:11Z
BUG: Fixed DataFrame.describe percentiles are ndarray w/ no median
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 76ba4a5f723fa..e84c8beeadcc7 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -236,7 +236,7 @@ Bug Fixes - Bug in ``TimedeltaIndex`` addition where overflow was being allowed without error (:issue:`14816`) - Bug in ``astype()`` where ``inf`` values were incorrectly converted to integers. Now raises error now with ``astype()`` for Series and DataFrames (:issue:`14265`) - +- Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 48d799811aa94..3678168890444 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5262,6 +5262,9 @@ def describe(self, percentiles=None, include=None, exclude=None): raise ValueError("Cannot describe a DataFrame without columns") if percentiles is not None: + # explicit conversion of `percentiles` to list + percentiles = list(percentiles) + # get them all to be in [0, 1] self._check_percentile(percentiles) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 84df82db69f77..3500ce913462a 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -996,6 +996,13 @@ def test_describe_percentiles_insert_median(self): self.assertTrue('0%' in d1.index) self.assertTrue('100%' in d2.index) + def test_describe_percentiles_insert_median_ndarray(self): + # GH14908 + df = tm.makeDataFrame() + result = df.describe(percentiles=np.array([.25, .75])) + expected = df.describe(percentiles=[.25, .75]) + assert_frame_equal(result, expected) + def test_describe_percentiles_unique(self): # GH13104 df = tm.makeDataFrame()
Explicit conversion to list for `percentiles`. Fixes the case where `percentiles` is passed as a numpy with no median (0.5) present. Closes #14908.
https://api.github.com/repos/pandas-dev/pandas/pulls/14914
2016-12-19T05:33:48Z
2016-12-19T20:30:21Z
null
2016-12-19T22:04:48Z
TST:Test to_sparse with nan dataframe (#10079)
diff --git a/pandas/sparse/tests/test_frame.py b/pandas/sparse/tests/test_frame.py index 5cc765a2c1cf3..ab12099b5624d 100644 --- a/pandas/sparse/tests/test_frame.py +++ b/pandas/sparse/tests/test_frame.py @@ -215,6 +215,21 @@ def test_constructor_preserve_attr(self): self.assertEqual(df['x'].dtype, np.int64) self.assertEqual(df['x'].fill_value, 0) + def test_constructor_nan_dataframe(self): + # GH 10079 + trains = np.arange(100) + tresholds = [10, 20, 30, 40, 50, 60] + tuples = [(i, j) for i in trains for j in tresholds] + index = pd.MultiIndex.from_tuples(tuples, + names=['trains', 'tresholds']) + matrix = np.empty((len(index), len(trains))) + matrix.fill(np.nan) + df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float) + result = df.to_sparse() + expected = pd.SparseDataFrame(matrix, index=index, columns=trains, + dtype=float) + tm.assert_sp_frame_equal(result, expected) + def test_dtypes(self): df = DataFrame(np.random.randn(10000, 4)) df.ix[:9998] = np.nan
- [x] closes #10079 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Possibly fixed with a PR that fixed one of #10627. Look like `to_dense` is fixed for 0.20.0, but should be unrelated to this.
https://api.github.com/repos/pandas-dev/pandas/pulls/14913
2016-12-18T23:04:24Z
2016-12-19T11:20:43Z
2016-12-19T11:20:43Z
2016-12-21T04:22:32Z
CLN: Resubmit of GH14700. Fixes GH14554. Errors other than Indexing…
diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index ea59f3fbf493a..af8f561f39aac 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -96,3 +96,5 @@ Bug Fixes - Bug in ``.plot(kind='kde')`` which did not drop missing values to generate the KDE Plot, instead generating an empty plot. (:issue:`14821`) - Bug in ``unstack()`` if called with a list of column(s) as an argument, regardless of the dtypes of all columns, they get coerced to ``object`` (:issue:`11847`) + +-Bug in indexing that transformed ``RecursionError`` into ``KeyError`` or ``IndexingError`` (:issue:`14554`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c4ae3dcca8367..107d68c192ead 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -848,7 +848,7 @@ def _multi_take(self, tup): [(a, self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS)]) return o.reindex(**d) - except: + except(KeyError, IndexingError): raise self._exception def _convert_for_reindex(self, key, axis=0):
…Error and KeyError now bubble up appropriately. - [x] closes #14554 - [ ] tests added / passed (Not required per GH14700 discussion) - [x] passes ``git diff upstream/master | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14912
2016-12-18T21:40:40Z
2016-12-19T23:46:27Z
null
2016-12-19T23:47:01Z
BUG, DOC: Improve dialect handling in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 8ddf4186eba25..af05a89a54a62 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -325,8 +325,11 @@ encoding : str, default ``None`` Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_. dialect : str or :class:`python:csv.Dialect` instance, default ``None`` - If ``None`` defaults to Excel dialect. Ignored if sep longer than 1 char. See - :class:`python:csv.Dialect` documentation for more details. + If provided, this parameter will override values (default or not) for the + following parameters: `delimiter`, `doublequote`, `escapechar`, + `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to + override values, a ParserWarning will be issued. See :class:`python:csv.Dialect` + documentation for more details. tupleize_cols : boolean, default ``False`` Leave a list of tuples on columns as is (default is to convert to a MultiIndex on the columns). diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 10684021d5599..095a7a5c13ba3 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -243,6 +243,8 @@ Other API Changes - ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`) - ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) +- ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`) + .. _whatsnew_0200.deprecations: Deprecations @@ -291,6 +293,8 @@ Bug Fixes - Bug in ``describe()`` when passing a numpy array which does not contain the median to the ``percentiles`` keyword argument (:issue:`14908`) - Bug in ``DataFrame.sort_values()`` when sorting by multiple columns where one column is of type ``int64`` and contains ``NaT`` (:issue:`14922`) - Bug in ``DataFrame.reindex()`` in which ``method`` was ignored when passing ``columns`` (:issue:`14992`) +- Bug in ``pd.read_csv()`` in which the ``dialect`` parameter was not being verified before processing (:issue:`14898`) + - Bug in ``pd.read_msgpack()`` in which ``Series`` categoricals were being improperly processed (:issue:`14901`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2332a9ade93ff..040ec3d803303 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -244,8 +244,11 @@ standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ dialect : str or csv.Dialect instance, default None - If None defaults to Excel dialect. Ignored if sep longer than 1 char - See csv.Dialect documentation for more details + If provided, this parameter will override values (default or not) for the + following parameters: `delimiter`, `doublequote`, `escapechar`, + `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to + override values, a ParserWarning will be issued. See csv.Dialect + documentation for more details. tupleize_cols : boolean, default False Leave a list of tuples on columns as is (default is to convert to a Multi Index on the columns) @@ -698,12 +701,33 @@ def __init__(self, f, engine=None, **kwds): dialect = kwds['dialect'] if dialect in csv.list_dialects(): dialect = csv.get_dialect(dialect) - kwds['delimiter'] = dialect.delimiter - kwds['doublequote'] = dialect.doublequote - kwds['escapechar'] = dialect.escapechar - kwds['skipinitialspace'] = dialect.skipinitialspace - kwds['quotechar'] = dialect.quotechar - kwds['quoting'] = dialect.quoting + + # Any valid dialect should have these attributes. + # If any are missing, we will raise automatically. + for param in ('delimiter', 'doublequote', 'escapechar', + 'skipinitialspace', 'quotechar', 'quoting'): + try: + dialect_val = getattr(dialect, param) + except AttributeError: + raise ValueError("Invalid dialect '{dialect}' provided" + .format(dialect=kwds['dialect'])) + provided = kwds.get(param, _parser_defaults[param]) + + # Messages for conflicting values between the dialect instance + # and the actual parameters provided. + conflict_msgs = [] + + if dialect_val != provided: + conflict_msgs.append(( + "Conflicting values for '{param}': '{val}' was " + "provided, but the dialect specifies '{diaval}'. " + "Using the dialect-specified value.".format( + param=param, val=provided, diaval=dialect_val))) + + if conflict_msgs: + warnings.warn('\n\n'.join(conflict_msgs), ParserWarning, + stacklevel=2) + kwds[param] = dialect_val if kwds.get('header', 'infer') == 'infer': kwds['header'] = 0 if kwds.get('names') is None else None diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py index c6c2a9e954f55..e694e529212aa 100644 --- a/pandas/io/tests/parser/common.py +++ b/pandas/io/tests/parser/common.py @@ -77,41 +77,6 @@ def test_read_csv(self): fname = prefix + compat.text_type(self.csv1) self.read_csv(fname, index_col=0, parse_dates=True) - def test_dialect(self): - data = """\ -label1,label2,label3 -index1,"a,c,e -index2,b,d,f -""" - - dia = csv.excel() - dia.quoting = csv.QUOTE_NONE - df = self.read_csv(StringIO(data), dialect=dia) - - data = '''\ -label1,label2,label3 -index1,a,c,e -index2,b,d,f -''' - exp = self.read_csv(StringIO(data)) - exp.replace('a', '"a', inplace=True) - tm.assert_frame_equal(df, exp) - - def test_dialect_str(self): - data = """\ -fruit:vegetable -apple:brocolli -pear:tomato -""" - exp = DataFrame({ - 'fruit': ['apple', 'pear'], - 'vegetable': ['brocolli', 'tomato'] - }) - dia = csv.register_dialect('mydialect', delimiter=':') # noqa - df = self.read_csv(StringIO(data), dialect='mydialect') - tm.assert_frame_equal(df, exp) - csv.unregister_dialect('mydialect') - def test_1000_sep(self): data = """A|B|C 1|2,334|5 diff --git a/pandas/io/tests/parser/dialect.py b/pandas/io/tests/parser/dialect.py new file mode 100644 index 0000000000000..ee50cf812f72e --- /dev/null +++ b/pandas/io/tests/parser/dialect.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +""" +Tests that dialects are properly handled during parsing +for all of the parsers defined in parsers.py +""" + +import csv + +from pandas import DataFrame +from pandas.compat import StringIO +from pandas.io.common import ParserWarning + +import pandas.util.testing as tm + + +class DialectTests(object): + + def test_dialect(self): + data = """\ +label1,label2,label3 +index1,"a,c,e +index2,b,d,f +""" + + dia = csv.excel() + dia.quoting = csv.QUOTE_NONE + with tm.assert_produces_warning(ParserWarning): + df = self.read_csv(StringIO(data), dialect=dia) + + data = '''\ +label1,label2,label3 +index1,a,c,e +index2,b,d,f +''' + exp = self.read_csv(StringIO(data)) + exp.replace('a', '"a', inplace=True) + tm.assert_frame_equal(df, exp) + + def test_dialect_str(self): + data = """\ +fruit:vegetable +apple:brocolli +pear:tomato +""" + exp = DataFrame({ + 'fruit': ['apple', 'pear'], + 'vegetable': ['brocolli', 'tomato'] + }) + csv.register_dialect('mydialect', delimiter=':') + with tm.assert_produces_warning(ParserWarning): + df = self.read_csv(StringIO(data), dialect='mydialect') + + tm.assert_frame_equal(df, exp) + csv.unregister_dialect('mydialect') + + def test_invalid_dialect(self): + class InvalidDialect(object): + pass + + data = 'a\n1' + msg = 'Invalid dialect' + + with tm.assertRaisesRegexp(ValueError, msg): + self.read_csv(StringIO(data), dialect=InvalidDialect) + + def test_dialect_conflict(self): + data = 'a,b\n1,2' + dialect = 'excel' + exp = DataFrame({'a': [1], 'b': [2]}) + + with tm.assert_produces_warning(None): + df = self.read_csv(StringIO(data), delimiter=',', dialect=dialect) + tm.assert_frame_equal(df, exp) + + with tm.assert_produces_warning(ParserWarning): + df = self.read_csv(StringIO(data), delimiter='.', dialect=dialect) + tm.assert_frame_equal(df, exp) diff --git a/pandas/io/tests/parser/test_parsers.py b/pandas/io/tests/parser/test_parsers.py index 6cca2e35e1135..a90f546d37fc8 100644 --- a/pandas/io/tests/parser/test_parsers.py +++ b/pandas/io/tests/parser/test_parsers.py @@ -11,6 +11,7 @@ from .common import ParserTests from .header import HeaderTests from .comment import CommentTests +from .dialect import DialectTests from .quoting import QuotingTests from .usecols import UsecolsTests from .skiprows import SkipRowsTests @@ -26,12 +27,12 @@ class BaseParser(CommentTests, CompressionTests, - ConverterTests, HeaderTests, - IndexColTests, MultithreadTests, - NAvaluesTests, ParseDatesTests, - ParserTests, SkipRowsTests, - UsecolsTests, QuotingTests, - DtypeTests): + ConverterTests, DialectTests, + HeaderTests, IndexColTests, + MultithreadTests, NAvaluesTests, + ParseDatesTests, ParserTests, + SkipRowsTests, UsecolsTests, + QuotingTests, DtypeTests): def read_csv(self, *args, **kwargs): raise NotImplementedError
1) Update documentation about how the `dialect` parameter is handled. 2) Verify that the `dialect` parameter passed in is valid before accessing the dialect attributes. Closes #14898.
https://api.github.com/repos/pandas-dev/pandas/pulls/14911
2016-12-18T20:35:30Z
2016-12-31T15:55:21Z
null
2017-01-19T20:58:17Z
BUG: select_dtypes now allows 'datetimetz' for generically selecting datetimes with timezones
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 76ba4a5f723fa..3eb8acdd300b5 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -106,6 +106,7 @@ Other enhancements - ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) - ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) +- ``.select_dtypes()`` now allows `datetimetz` to generically select datetimes with tz (:issue:`14910`) .. _whatsnew_0200.api_breaking: @@ -249,5 +250,4 @@ Bug Fixes - - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 78d0f47d473c8..7305df0f57736 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2257,7 +2257,12 @@ def select_dtypes(self, include=None, exclude=None): this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ + * To select datetimes, use np.datetime64, 'datetime' or 'datetime64' + * To select timedeltas, use np.timedelta64, 'timedelta' or + 'timedelta64' * To select Pandas categorical dtypes, use 'category' + * To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0), + or a 'datetime64[ns, tz]' string Examples -------- diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 61030c262a44b..43a108e9acc80 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -109,15 +109,48 @@ def test_select_dtypes_include(self): 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], - 'f': pd.Categorical(list('abc'))}) + 'f': pd.Categorical(list('abc')), + 'g': pd.date_range('20130101', periods=3), + 'h': pd.date_range('20130101', periods=3, + tz='US/Eastern'), + 'i': pd.date_range('20130101', periods=3, + tz='CET'), + 'j': pd.period_range('2013-01', periods=3, + freq='M'), + 'k': pd.timedelta_range('1 day', periods=3)}) + ri = df.select_dtypes(include=[np.number]) + ei = df[['b', 'c', 'd', 'k']] + assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number], exclude=['timedelta']) ei = df[['b', 'c', 'd']] assert_frame_equal(ri, ei) - ri = df.select_dtypes(include=[np.number, 'category']) + ri = df.select_dtypes(include=[np.number, 'category'], + exclude=['timedelta']) ei = df[['b', 'c', 'd', 'f']] assert_frame_equal(ri, ei) + ri = df.select_dtypes(include=['datetime']) + ei = df[['g']] + assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=['datetime64']) + ei = df[['g']] + assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=['datetimetz']) + ei = df[['h', 'i']] + assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=['timedelta']) + ei = df[['k']] + assert_frame_equal(ri, ei) + + self.assertRaises(NotImplementedError, + lambda: df.select_dtypes(include=['period'])) + def test_select_dtypes_exclude(self): df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), diff --git a/pandas/types/common.py b/pandas/types/common.py index b9d4c112c00d6..a7ba96f95e31b 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -400,6 +400,11 @@ def _get_dtype_from_object(dtype): pass return dtype.type elif isinstance(dtype, string_types): + if dtype in ['datetimetz', 'datetime64tz']: + return DatetimeTZDtype.type + elif dtype in ['period']: + raise NotImplementedError + if dtype == 'datetime' or dtype == 'timedelta': dtype += '64'
was missing a generic way to use select_dtypes to select *any* datetime with tz.
https://api.github.com/repos/pandas-dev/pandas/pulls/14910
2016-12-18T20:19:13Z
2016-12-19T11:02:27Z
2016-12-19T11:02:27Z
2016-12-19T11:02:27Z
CLN: remove simple _DATELIKE_DTYPES test and replace with is_datetimelike accessor
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index b249cded39133..950ad53abe5e0 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -14,10 +14,10 @@ from pandas.compat.numpy import function as nv from pandas.compat.numpy import _np_version_under1p8 -from pandas.types.common import (_DATELIKE_DTYPES, - is_numeric_dtype, +from pandas.types.common import (is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_categorical_dtype, + is_datetimelike, is_datetime_or_timedelta_dtype, is_bool, is_integer_dtype, is_complex_dtype, @@ -3453,10 +3453,10 @@ def first_non_None_value(values): # if we have date/time like in the original, then coerce dates # as we are stacking can easily have object dtypes here so = self._selected_obj - if (so.ndim == 2 and so.dtypes.isin(_DATELIKE_DTYPES).any()): + if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()): result = result._convert(numeric=True) date_cols = self._selected_obj.select_dtypes( - include=list(_DATELIKE_DTYPES)).columns + include=['datetime', 'timedelta']).columns date_cols = date_cols.intersection(result.columns) result[date_cols] = (result[date_cols] ._convert(datetime=True, diff --git a/pandas/types/cast.py b/pandas/types/cast.py index d4beab5655e5c..4f4f95d5a455b 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -7,6 +7,7 @@ from pandas.compat import string_types, text_type, PY3 from .common import (_ensure_object, is_bool, is_integer, is_float, is_complex, is_datetimetz, is_categorical_dtype, + is_datetimelike, is_extension_type, is_object_dtype, is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, is_dtype_equal, @@ -18,7 +19,7 @@ _ensure_int8, _ensure_int16, _ensure_int32, _ensure_int64, _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE, - _DATELIKE_DTYPES, _POSSIBLY_CAST_DTYPES) + _POSSIBLY_CAST_DTYPES) from .dtypes import ExtensionDtype from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries from .missing import isnull, notnull @@ -164,7 +165,7 @@ def _maybe_upcast_putmask(result, mask, other): # in np.place: # NaN -> NaT # integer or integer array -> date-like array - if result.dtype in _DATELIKE_DTYPES: + if is_datetimelike(result.dtype): if is_scalar(other): if isnull(other): other = result.dtype.type('nat') @@ -666,7 +667,7 @@ def _possibly_castable(arr): # otherwise try to coerce kind = arr.dtype.kind if kind == 'M' or kind == 'm': - return arr.dtype in _DATELIKE_DTYPES + return is_datetime64_dtype(arr.dtype) return arr.dtype.name not in _POSSIBLY_CAST_DTYPES diff --git a/pandas/types/common.py b/pandas/types/common.py index 5d161efa838de..b9d4c112c00d6 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -23,10 +23,6 @@ _TD_DTYPE = np.dtype('m8[ns]') _INT64_DTYPE = np.dtype(np.int64) -_DATELIKE_DTYPES = set([np.dtype(t) - for t in ['M8[ns]', '<M8[ns]', '>M8[ns]', - 'm8[ns]', '<m8[ns]', '>m8[ns]']]) - _ensure_float64 = algos.ensure_float64 _ensure_float32 = algos.ensure_float32 @@ -127,7 +123,8 @@ def is_datetime_arraylike(arr): def is_datetimelike(arr): - return (arr.dtype in _DATELIKE_DTYPES or + return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or + is_timedelta64_dtype(arr) or isinstance(arr, ABCPeriodIndex) or is_datetimetz(arr)) diff --git a/pandas/types/missing.py b/pandas/types/missing.py index a4af127e0c381..e6791b79bf3bd 100644 --- a/pandas/types/missing.py +++ b/pandas/types/missing.py @@ -19,8 +19,7 @@ is_object_dtype, is_integer, _TD_DTYPE, - _NS_DTYPE, - _DATELIKE_DTYPES) + _NS_DTYPE) from .inference import is_list_like @@ -169,7 +168,7 @@ def _isnull_ndarraylike_old(obj): vec = lib.isnullobj_old(values.ravel()) result[:] = vec.reshape(shape) - elif dtype in _DATELIKE_DTYPES: + elif is_datetime64_dtype(dtype): # this is the NaT pattern result = values.view('i8') == iNaT else:
this was some older code
https://api.github.com/repos/pandas-dev/pandas/pulls/14909
2016-12-18T20:17:55Z
2016-12-18T20:29:36Z
null
2016-12-18T20:29:36Z
TST: Groupby/transform with grouped NaN (#9941)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 97e1f7dc94866..f8d9d73590a60 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1374,6 +1374,15 @@ def test_groupby_transform_with_int(self): expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1])) assert_frame_equal(result, expected) + def test_groupby_transform_with_nan_group(self): + # GH 9941 + df = pd.DataFrame({'a': range(10), + 'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]}) + result = df.groupby(df.b)['a'].transform(max) + expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.], + name='a') + assert_series_equal(result, expected) + def test_indices_concatenation_order(self): # GH 2808
- [x] closes #9941 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` The original issue works on 0.19.1 as expected (resulting value is `nan`). Doesn't look like this was fixed in a PR for 0.19.2 or 0.20
https://api.github.com/repos/pandas-dev/pandas/pulls/14907
2016-12-18T07:12:07Z
2016-12-18T20:24:12Z
2016-12-18T20:24:12Z
2016-12-21T04:21:29Z
TST: Test timedelta arithmetic (#9396)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index fc95b17b9b52d..1d07b4ab39a99 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -1203,6 +1203,28 @@ def test_implementation_limits(self): with tm.assertRaises(OverflowError): Timedelta(max_td.value + 1, 'ns') + def test_timedelta_arithmetic(self): + data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]') + deltas = [timedelta(days=1), Timedelta(1, unit='D')] + for delta in deltas: + result_method = data.add(delta) + result_operator = data + delta + expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + + result_method = data.sub(delta) + result_operator = data - delta + expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + # GH 9396 + result_method = data.div(delta) + result_operator = data / delta + expected = pd.Series([np.nan, 32.], dtype='float64') + tm.assert_series_equal(result_operator, expected) + tm.assert_series_equal(result_method, expected) + class TestTimedeltaIndex(tm.TestCase): _multiprocess_can_split_ = True
- [x] closes #9396 - [x] tests added / passed - [x] passes ``git diff upstream/master | flake8 --diff`` Original issue was with using `div` with `datetime.timedelta`. Doesn't appear this was fixed recently.
https://api.github.com/repos/pandas-dev/pandas/pulls/14906
2016-12-18T05:12:17Z
2016-12-18T20:22:25Z
2016-12-18T20:22:25Z
2016-12-21T04:20:39Z
PR for Pandas issue #14872 / fillna() TZ datetime64 rounded
diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index 4cd58f0148ae8..c9056054fb1d7 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -38,6 +38,7 @@ Other Enhancements Bug Fixes ~~~~~~~~~ +- Bug in fillna() in which timezone aware datetime64 values were incorrectly rounded (:issue:'14872') - Compat with ``dateutil==2.6.0``; segfault reported in the testing suite (:issue:`14621`) - Allow ``nanoseconds`` in ``Timestamp.replace`` as a kwarg (:issue:`14621`) - Bug in ``pd.read_csv`` in which aliasing was being done for ``na_values`` when passed in as a dictionary (:issue:`14203`) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index f1191ff1c7009..e83a0518d97f6 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -10,9 +10,8 @@ from pandas.compat import range, string_types from pandas.types.common import (is_numeric_v_string_like, is_float_dtype, is_datetime64_dtype, - is_integer_dtype, _ensure_float64, - is_scalar, - _DATELIKE_DTYPES, + is_datetime64tz_dtype, is_integer_dtype, + _ensure_float64, is_scalar, needs_i8_conversion) from pandas.types.missing import isnull @@ -450,7 +449,7 @@ def pad_1d(values, limit=None, mask=None, dtype=None): _method = None if is_float_dtype(values): _method = getattr(algos, 'pad_inplace_%s' % dtype.name, None) - elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values): + elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_1d_datetime elif is_integer_dtype(values): values = _ensure_float64(values) @@ -475,7 +474,7 @@ def backfill_1d(values, limit=None, mask=None, dtype=None): _method = None if is_float_dtype(values): _method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None) - elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values): + elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_1d_datetime elif is_integer_dtype(values): values = _ensure_float64(values) @@ -501,7 +500,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None): _method = None if is_float_dtype(values): _method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None) - elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values): + elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_2d_datetime elif is_integer_dtype(values): values = _ensure_float64(values) @@ -531,7 +530,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): _method = None if is_float_dtype(values): _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None) - elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values): + elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_2d_datetime elif is_integer_dtype(values): values = _ensure_float64(values) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 5666a07cad4b8..58f50ffbf08f5 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -17,6 +17,9 @@ from .common import TestData +import datetime +import pytz + def _skip_if_no_pchip(): try: @@ -908,6 +911,24 @@ def test_interp_timedelta64(self): index=pd.to_timedelta([1, 2, 4])) assert_series_equal(result, expected) + # GH 14872 + def test_dtype_utc(self): + + data = pd.Series([pd.NaT, pd.NaT, + datetime.datetime(2016, 12, 12, 22, 24, 6, 100001, + tzinfo=pytz.utc)]) + + filled = data.fillna(method='bfill') + + expected = pd.Series([ + datetime.datetime(2016, 12, 12, 22, 24, 6, + 100001, tzinfo=pytz.utc), + datetime.datetime(2016, 12, 12, 22, 24, 6, + 100001, tzinfo=pytz.utc), + datetime.datetime(2016, 12, 12, 22, 24, 6, + 100001, tzinfo=pytz.utc)]) + + assert_series_equal(filled, expected) if __name__ == '__main__': import nose diff --git a/pandas/types/common.py b/pandas/types/common.py index 754ff80924c07..5d161efa838de 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -22,6 +22,7 @@ _NS_DTYPE = np.dtype('M8[ns]') _TD_DTYPE = np.dtype('m8[ns]') _INT64_DTYPE = np.dtype(np.int64) + _DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]', 'm8[ns]', '<m8[ns]', '>m8[ns]']])
Hi Jeff, The change adds a "datetime64[ns, UTC]" object to the _DATELIKE_DTYPES set in pandas/types/common.py. No such object existed in this set and that is the reason the wrong method was executed on the inputs. Object is created with "DatetimeTZDtype.__new__" Thanks, Rodolfo - [X] closes #14872 - [ X] tests added / passed - [ X] passes ``git diff upstream/master | flake8 --diff`` (returns the same number of skips, errors and failures before changes) - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/14905
2016-12-17T20:43:16Z
2016-12-18T19:42:28Z
null
2016-12-18T19:42:38Z
ENH: Added to_json_schema
diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip index 08240184f2934..eb796368e7820 100644 --- a/ci/requirements-2.7.pip +++ b/ci/requirements-2.7.pip @@ -4,3 +4,5 @@ pathlib backports.lzma py PyCrypto +mock +ipython diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run index b07ce611c79a2..43e6814ed6c8e 100644 --- a/ci/requirements-3.5.run +++ b/ci/requirements-3.5.run @@ -18,3 +18,4 @@ pymysql psycopg2 s3fs beautifulsoup4 +ipython diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index 5d9cb05a7b402..9a6c1c7edbc5e 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -18,3 +18,4 @@ pymysql beautifulsoup4 s3fs xarray +ipython diff --git a/doc/source/api.rst b/doc/source/api.rst index 6c4a3cff5b4cf..33ac5fde651d4 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -60,6 +60,7 @@ JSON :toctree: generated/ json_normalize + build_table_schema .. currentmodule:: pandas diff --git a/doc/source/io.rst b/doc/source/io.rst index b36ae8c2ed450..c34cc1ec17512 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2033,6 +2033,126 @@ using Hadoop or Spark. df df.to_json(orient='records', lines=True) + +.. _io.table_schema: + +Table Schema +'''''''''''' + +.. versionadded:: 0.20.0 + +`Table Schema`_ is a spec for describing tabular datasets as a JSON +object. The JSON includes information on the field names, types, and +other attributes. You can use the orient ``table`` to build +a JSON string with two fields, ``schema`` and ``data``. + +.. ipython:: python + + df = pd.DataFrame( + {'A': [1, 2, 3], + 'B': ['a', 'b', 'c'], + 'C': pd.date_range('2016-01-01', freq='d', periods=3), + }, index=pd.Index(range(3), name='idx')) + df + df.to_json(orient='table', date_format="iso") + +The ``schema`` field contains the ``fields`` key, which itself contains +a list of column name to type pairs, including the ``Index`` or ``MultiIndex`` +(see below for a list of types). +The ``schema`` field also contains a ``primaryKey`` field if the (Multi)index +is unique. + +The second field, ``data``, contains the serialized data with the ``records`` +orient. +The index is included, and any datetimes are ISO 8601 formatted, as required +by the Table Schema spec. + +The full list of types supported are described in the Table Schema +spec. This table shows the mapping from pandas types: + +============== ================= +Pandas type Table Schema type +============== ================= +int64 integer +float64 number +bool boolean +datetime64[ns] datetime +timedelta64[ns] duration +categorical any +object str +=============== ================= + +A few notes on the generated table schema: + +- The ``schema`` object contains a ``pandas_version`` field. This contains + the version of pandas' dialect of the schema, and will be incremented + with each revision. +- All dates are converted to UTC when serializing. Even timezone naïve values, + which are treated as UTC with an offset of 0. + + .. ipython:: python: + + from pandas.io.json import build_table_schema + s = pd.Series(pd.date_range('2016', periods=4)) + build_table_schema(s) + +- datetimes with a timezone (before serializing), include an additional field + ``tz`` with the time zone name (e.g. ``'US/Central'``). + + .. ipython:: python + + s_tz = pd.Series(pd.date_range('2016', periods=12, + tz='US/Central')) + build_table_schema(s_tz) + +- Periods are converted to timestamps before serialization, and so have the + same behavior of being converted to UTC. In addition, periods will contain + and additional field ``freq`` with the period's frequency, e.g. ``'A-DEC'`` + + .. ipython:: python + + s_per = pd.Series(1, index=pd.period_range('2016', freq='A-DEC', + periods=4)) + build_table_schema(s_per) + +- Categoricals use the ``any`` type and an ``enum`` constraint listing + the set of possible values. Additionally, an ``ordered`` field is included + + .. ipython:: python + + s_cat = pd.Series(pd.Categorical(['a', 'b', 'a'])) + build_table_schema(s_cat) + +- A ``primaryKey`` field, containing an array of labels, is included + *if the index is unique*: + + .. ipython:: python + + s_dupe = pd.Series([1, 2], index=[1, 1]) + build_table_schema(s_dupe) + +- The ``primaryKey`` behavior is the same with MultiIndexes, but in this + case the ``primaryKey`` is an array: + + .. ipython:: python + + s_multi = pd.Series(1, index=pd.MultiIndex.from_product([('a', 'b'), + (0, 1)])) + build_table_schema(s_multi) + +- The default naming roughly follows these rules: + + + For series, the ``object.name`` is used. If that's none, then the + name is ``values`` + + For DataFrames, the stringified version of the column name is used + + For ``Index`` (not ``MultiIndex``), ``index.name`` is used, with a + fallback to ``index`` if that is None. + + For ``MultiIndex``, ``mi.names`` is used. If any level has no name, + then ``level_<i>`` is used. + + +_Table Schema: http://specs.frictionlessdata.io/json-table-schema/ + HTML ---- diff --git a/doc/source/options.rst b/doc/source/options.rst index 10a13ed36df8d..1a0e5cf6b7235 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -397,6 +397,9 @@ display.width 80 Width of the display in charact IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to correctly detect the width. +display.html.table_schema False Whether to publish a Table Schema + representation for frontends that + support it. html.border 1 A ``border=value`` attribute is inserted in the ``<table>`` tag for the DataFrame HTML repr. @@ -424,6 +427,7 @@ mode.use_inf_as_null False True means treat None, NaN, -IN are not null (new way). =================================== ============ ================================== + .. _basics.console_output: Number Formatting @@ -512,3 +516,20 @@ Enabling ``display.unicode.ambiguous_as_wide`` lets pandas to figure these chara pd.set_option('display.unicode.east_asian_width', False) pd.set_option('display.unicode.ambiguous_as_wide', False) + +.. _options.table_schema: + +Table Schema Display +-------------------- + +.. versionadded:: 0.20.0 + +``DataFrame`` and ``Series`` will publish a Table Schema representation +by default. False by default, this can be enabled globally with the +``display.html.table_schema`` option: + +.. ipython:: python + + pd.set_option('display.html.table_schema', True) + +Only ``'display.max_rows'`` are serialized and published. diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 8b6c53a159ad8..7b4538bd181d2 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -12,6 +12,7 @@ Highlights include: - Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`) - The ``.ix`` indexer has been deprecated, see :ref:`here <whatsnew_0200.api_breaking.deprecate_ix>` - Switched the test framework to `pytest`_ (:issue:`13097`) +- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref: `here <whatsnew_0200.enhancements.table_schema>` .. _pytest: http://doc.pytest.org/en/latest/ @@ -154,6 +155,40 @@ New Behavior: df[df.chromosomes != '1'].groupby('chromosomes', sort=False).sum() +.. _whatsnew_0200.enhancements.table_schema + +Table Schema Output +^^^^^^^^^^^^^^^^^^^ + +The new orient ``'table'`` for :meth:`DataFrame.to_json` +will generate a `Table Schema`_ compatible string representation of +the data. + +.. ipython:: python + + df = pd.DataFrame( + {'A': [1, 2, 3], + 'B': ['a', 'b', 'c'], + 'C': pd.date_range('2016-01-01', freq='d', periods=3), + }, index=pd.Index(range(3), name='idx')) + df + df.to_json(orient='table') + + +See :ref:`IO: Table Schema for more<io.table_schema>`. + +Additionally, the repr for ``DataFrame`` and ``Series`` can now publish +this JSON Table schema representation of the Series or DataFrame if you are +using IPython (or another frontend like `nteract`_ using the Jupyter messaging +protocol). +This gives frontends like the Jupyter notebook and `nteract`_ +more flexiblity in how they display pandas objects, since they have +more information about the data. +You must enable this by setting the ``display.html.table_schema`` option to True. + +.. _Table Schema: http://specs.frictionlessdata.io/json-table-schema/ +.. _nteract: http://nteract.io/ + .. _whatsnew_0200.enhancements.other: Other enhancements diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 89616890e1de1..931fe0661818d 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -164,6 +164,13 @@ (default: False) """ +pc_table_schema_doc = """ +: boolean + Whether to publish a Table Schema representation for frontends + that support it. + (default: False) +""" + pc_line_width_deprecation_warning = """\ line_width has been deprecated, use display.width instead (currently both are identical) @@ -366,6 +373,9 @@ def mpl_style_cb(key): validator=is_text) cf.register_option('latex.multirow', False, pc_latex_multirow, validator=is_bool) + cf.register_option('html.table_schema', False, pc_table_schema_doc, + validator=is_bool) + cf.deprecate_option('display.line_width', msg=pc_line_width_deprecation_warning, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 127aac970fbc1..298fa75779420 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4,6 +4,7 @@ import operator import weakref import gc +import json import numpy as np import pandas.lib as lib @@ -129,6 +130,37 @@ def __init__(self, data, axes=None, copy=False, dtype=None, object.__setattr__(self, '_data', data) object.__setattr__(self, '_item_cache', {}) + def _ipython_display_(self): + try: + from IPython.display import display + except ImportError: + return None + + # Series doesn't define _repr_html_ or _repr_latex_ + latex = self._repr_latex_() if hasattr(self, '_repr_latex_') else None + html = self._repr_html_() if hasattr(self, '_repr_html_') else None + table_schema = self._repr_table_schema_() + # We need the inital newline since we aren't going through the + # usual __repr__. See + # https://github.com/pandas-dev/pandas/pull/14904#issuecomment-277829277 + text = "\n" + repr(self) + + reprs = {"text/plain": text, "text/html": html, "text/latex": latex, + "application/vnd.dataresource+json": table_schema} + reprs = {k: v for k, v in reprs.items() if v} + display(reprs, raw=True) + + def _repr_table_schema_(self): + """ + Not a real Jupyter special repr method, but we use the same + naming convention. + """ + if config.get_option("display.html.table_schema"): + data = self.head(config.get_option('display.max_rows')) + payload = json.loads(data.to_json(orient='table'), + object_pairs_hook=collections.OrderedDict) + return payload + def _validate_dtype(self, dtype): """ validate the passed dtype """ @@ -1094,7 +1126,7 @@ def __setstate__(self, state): strings before writing. """ - def to_json(self, path_or_buf=None, orient=None, date_format='epoch', + def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', default_handler=None, lines=False): """ @@ -1129,10 +1161,17 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', - index : dict like {index -> {column -> value}} - columns : dict like {column -> {index -> value}} - values : just the values array + - table : dict like {'schema': {schema}, 'data': {data}} + describing the data, and the data component is + like ``orient='records'``. - date_format : {'epoch', 'iso'} + .. versionchanged:: 0.20.0 + + date_format : {None, 'epoch', 'iso'} Type of date conversion. `epoch` = epoch milliseconds, - `iso`` = ISO8601, default is epoch. + `iso` = ISO8601. The default depends on the `orient`. For + `orient='table'`, the default is `'iso'`. For all other orients, + the default is `'epoch'`. double_precision : The number of decimal places to use when encoding floating point values, default 10. force_ascii : force encoded string to be ASCII, default True. @@ -1151,14 +1190,53 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', .. versionadded:: 0.19.0 - Returns ------- same type as input object with filtered info axis + See Also + -------- + pd.read_json + + Examples + -------- + + >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], + ... index=['row 1', 'row 2'], + ... columns=['col 1', 'col 2']) + >>> df.to_json(orient='split') + '{"columns":["col 1","col 2"], + "index":["row 1","row 2"], + "data":[["a","b"],["c","d"]]}' + + Encoding/decoding a Dataframe using ``'index'`` formatted JSON: + + >>> df.to_json(orient='index') + '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' + + Encoding/decoding a Dataframe using ``'records'`` formatted JSON. + Note that index labels are not preserved with this encoding. + + >>> df.to_json(orient='records') + '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' + + Encoding with Table Schema + + >>> df.to_json(orient='table') + '{"schema": {"fields": [{"name": "index", "type": "string"}, + {"name": "col 1", "type": "string"}, + {"name": "col 2", "type": "string"}], + "primaryKey": "index", + "pandas_version": "0.20.0"}, + "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, + {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ from pandas.io import json + if date_format is None and orient == 'table': + date_format = 'iso' + elif date_format is None: + date_format = 'epoch' return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, diff --git a/pandas/io/json/__init__.py b/pandas/io/json/__init__.py index a9390a04cc2cd..32d110b3404a9 100644 --- a/pandas/io/json/__init__.py +++ b/pandas/io/json/__init__.py @@ -1,4 +1,5 @@ from .json import to_json, read_json, loads, dumps # noqa from .normalize import json_normalize # noqa +from .table_schema import build_table_schema # noqa -del json, normalize # noqa +del json, normalize, table_schema # noqa diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 6fc766081eefe..a00d3492e8a37 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -1,5 +1,4 @@ # pylint: disable-msg=E1101,W0613,W0603 - import os import numpy as np @@ -12,10 +11,14 @@ from pandas.core.common import AbstractMethodError from pandas.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits +from .table_schema import build_table_schema +from pandas.types.common import is_period_dtype loads = _json.loads dumps = _json.dumps +TABLE_SCHEMA_VERSION = '0.20.0' + # interface to/from def to_json(path_or_buf, obj, orient=None, date_format='epoch', @@ -26,19 +29,22 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch', raise ValueError( "'lines' keyword only valid when 'orient' is records") - if isinstance(obj, Series): - s = SeriesWriter( - obj, orient=orient, date_format=date_format, - double_precision=double_precision, ensure_ascii=force_ascii, - date_unit=date_unit, default_handler=default_handler).write() + if orient == 'table' and isinstance(obj, Series): + obj = obj.to_frame(name=obj.name or 'values') + if orient == 'table' and isinstance(obj, DataFrame): + writer = JSONTableWriter + elif isinstance(obj, Series): + writer = SeriesWriter elif isinstance(obj, DataFrame): - s = FrameWriter( - obj, orient=orient, date_format=date_format, - double_precision=double_precision, ensure_ascii=force_ascii, - date_unit=date_unit, default_handler=default_handler).write() + writer = FrameWriter else: raise NotImplementedError("'obj' should be a Series or a DataFrame") + s = writer( + obj, orient=orient, date_format=date_format, + double_precision=double_precision, ensure_ascii=force_ascii, + date_unit=date_unit, default_handler=default_handler).write() + if lines: s = _convert_to_line_delimits(s) @@ -81,7 +87,8 @@ def write(self): ensure_ascii=self.ensure_ascii, date_unit=self.date_unit, iso_dates=self.date_format == 'iso', - default_handler=self.default_handler) + default_handler=self.default_handler + ) class SeriesWriter(Writer): @@ -108,6 +115,55 @@ def _format_axes(self): "'%s'." % self.orient) +class JSONTableWriter(FrameWriter): + _default_orient = 'records' + + def __init__(self, obj, orient, date_format, double_precision, + ensure_ascii, date_unit, default_handler=None): + """ + Adds a `schema` attribut with the Table Schema, resets + the index (can't do in caller, because the schema inference needs + to know what the index is, forces orient to records, and forces + date_format to 'iso'. + """ + super(JSONTableWriter, self).__init__( + obj, orient, date_format, double_precision, ensure_ascii, + date_unit, default_handler=default_handler) + + if date_format != 'iso': + msg = ("Trying to write with `orient='table'` and " + "`date_format='%s'`. Table Schema requires dates " + "to be formatted with `date_format='iso'`" % date_format) + raise ValueError(msg) + + self.schema = build_table_schema(obj) + + # TODO: Do this timedelta properly in objToJSON.c See GH #15137 + if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or + len(obj.columns & obj.index.names)): + msg = "Overlapping names between the index and columns" + raise ValueError(msg) + + obj = obj.copy() + timedeltas = obj.select_dtypes(include=['timedelta']).columns + if len(timedeltas): + obj[timedeltas] = obj[timedeltas].applymap( + lambda x: x.isoformat()) + # Convert PeriodIndex to datetimes before serialzing + if is_period_dtype(obj.index): + obj.index = obj.index.to_timestamp() + + self.obj = obj.reset_index() + self.date_format = 'iso' + self.orient = 'records' + + def write(self): + data = super(JSONTableWriter, self).write() + serialized = '{{"schema": {}, "data": {}}}'.format( + dumps(self.schema), data) + return serialized + + def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, @@ -244,6 +300,17 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, col 1 col 2 0 a b 1 c d + + Encoding with Table Schema + + >>> df.to_json(orient='table') + '{"schema": {"fields": [{"name": "index", "type": "string"}, + {"name": "col 1", "type": "string"}, + {"name": "col 2", "type": "string"}], + "primaryKey": "index", + "pandas_version": "0.20.0"}, + "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, + {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf, diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py new file mode 100644 index 0000000000000..48f92d28baf61 --- /dev/null +++ b/pandas/io/json/table_schema.py @@ -0,0 +1,177 @@ +""" +Table Schema builders + +http://specs.frictionlessdata.io/json-table-schema/ +""" +from pandas.types.common import ( + is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, + is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, + is_categorical_dtype, is_period_dtype, is_string_dtype +) + + +def as_json_table_type(x): + """ + Convert a NumPy / pandas type to its corresponding json_table. + + Parameters + ---------- + x : array or dtype + + Returns + ------- + t : str + the Table Schema data types + + Notes + ----- + This table shows the relationship between NumPy / pandas dtypes, + and Table Schema dtypes. + + ============== ================= + Pandas type Table Schema type + ============== ================= + int64 integer + float64 number + bool boolean + datetime64[ns] datetime + timedelta64[ns] duration + object str + categorical any + =============== ================= + """ + if is_integer_dtype(x): + return 'integer' + elif is_bool_dtype(x): + return 'boolean' + elif is_numeric_dtype(x): + return 'number' + elif (is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or + is_period_dtype(x)): + return 'datetime' + elif is_timedelta64_dtype(x): + return 'duration' + elif is_categorical_dtype(x): + return 'any' + elif is_string_dtype(x): + return 'string' + else: + return 'any' + + +def set_default_names(data): + """Sets index names to 'index' for regular, or 'level_x' for Multi""" + if all(name is not None for name in data.index.names): + return data + + data = data.copy() + if data.index.nlevels > 1: + names = [name if name is not None else 'level_{}'.format(i) + for i, name in enumerate(data.index.names)] + data.index.names = names + else: + data.index.name = data.index.name or 'index' + return data + + +def make_field(arr, dtype=None): + dtype = dtype or arr.dtype + field = {'name': arr.name or 'values', + 'type': as_json_table_type(dtype)} + + if is_categorical_dtype(arr): + if hasattr(arr, 'categories'): + cats = arr.categories + ordered = arr.ordered + else: + cats = arr.cat.categories + ordered = arr.cat.ordered + field['constraints'] = {"enum": list(cats)} + field['ordered'] = ordered + elif is_period_dtype(arr): + field['freq'] = arr.freqstr + elif is_datetime64tz_dtype(arr): + if hasattr(arr, 'dt'): + field['tz'] = arr.dt.tz.zone + else: + field['tz'] = arr.tz.zone + return field + + +def build_table_schema(data, index=True, primary_key=None, version=True): + """ + Create a Table schema from ``data``. + + Parameters + ---------- + data : Series, DataFrame + index : bool, default True + Whether to include ``data.index`` in the schema. + primary_key : bool or None, default True + column names to designate as the primary key. + The default `None` will set `'primaryKey'` to the index + level or levels if the index is unique. + version : bool, default True + Whether to include a field `pandas_version` with the version + of pandas that generated the schema. + + Returns + ------- + schema : dict + + Examples + -------- + >>> df = pd.DataFrame( + ... {'A': [1, 2, 3], + ... 'B': ['a', 'b', 'c'], + ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), + ... }, index=pd.Index(range(3), name='idx')) + >>> build_table_schema(df) + {'fields': [{'name': 'idx', 'type': 'integer'}, + {'name': 'A', 'type': 'integer'}, + {'name': 'B', 'type': 'string'}, + {'name': 'C', 'type': 'datetime'}], + 'pandas_version': '0.20.0', + 'primaryKey': ['idx']} + + Notes + ----- + See `_as_json_table_type` for conversion types. + Timedeltas as converted to ISO8601 duration format with + 9 decimal places after the secnods field for nanosecond precision. + + Categoricals are converted to the `any` dtype, and use the `enum` field + constraint to list the allowed values. The `ordered` attribute is included + in an `ordered` field. + """ + if index is True: + data = set_default_names(data) + + schema = {} + fields = [] + + if index: + if data.index.nlevels > 1: + for level in data.index.levels: + fields.append(make_field(level)) + else: + fields.append(make_field(data.index)) + + if data.ndim > 1: + for column, s in data.iteritems(): + fields.append(make_field(s)) + else: + fields.append(make_field(data)) + + schema['fields'] = fields + if index and data.index.is_unique and primary_key is None: + if data.index.nlevels == 1: + schema['primaryKey'] = [data.index.name] + else: + schema['primaryKey'] = data.index.names + elif primary_key is not None: + schema['primaryKey'] = primary_key + + if version: + schema['pandas_version'] = '0.20.0' + return schema diff --git a/pandas/tests/formats/test_printing.py b/pandas/tests/formats/test_printing.py index 52f3e06c6cbd0..cacba2ad3f3ba 100644 --- a/pandas/tests/formats/test_printing.py +++ b/pandas/tests/formats/test_printing.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- +import pytest from pandas import compat +import pandas as pd import pandas.formats.printing as printing import pandas.formats.format as fmt import pandas.util.testing as tm @@ -118,6 +120,65 @@ def test_ambiguous_width(self): self.assertEqual(adjoined, expected) +class TestTableSchemaRepr(tm.TestCase): + + @classmethod + def setUpClass(cls): + pytest.importorskip('IPython') + try: + import mock + except ImportError: + try: + from unittest import mock + except ImportError: + pytest.skip("Mock is not installed") + cls.mock = mock + + def test_publishes(self): + df = pd.DataFrame({"A": [1, 2]}) + objects = [df['A'], df, df] # dataframe / series + expected_keys = [ + {'text/plain', 'application/vnd.dataresource+json'}, + {'text/plain', 'text/html', 'application/vnd.dataresource+json'}, + ] + + make_patch = self.mock.patch('IPython.display.display') + opt = pd.option_context('display.html.table_schema', True) + for obj, expected in zip(objects, expected_keys): + with opt, make_patch as mock_display: + handle = obj._ipython_display_() + self.assertEqual(mock_display.call_count, 1) + self.assertIsNone(handle) + args, kwargs = mock_display.call_args + arg, = args # just one argument + + self.assertEqual(kwargs, {"raw": True}) + self.assertEqual(set(arg.keys()), expected) + + with_latex = pd.option_context('display.latex.repr', True) + + with opt, with_latex, make_patch as mock_display: + handle = obj._ipython_display_() + args, kwargs = mock_display.call_args + arg, = args + + expected = {'text/plain', 'text/html', 'text/latex', + 'application/vnd.dataresource+json'} + self.assertEqual(set(arg.keys()), expected) + + def test_config_on(self): + df = pd.DataFrame({"A": [1, 2]}) + with pd.option_context("display.html.table_schema", True): + result = df._repr_table_schema_() + self.assertIsNotNone(result) + + def test_config_default_off(self): + df = pd.DataFrame({"A": [1, 2]}) + with pd.option_context("display.html.table_schema", False): + result = df._repr_table_schema_() + self.assertIsNone(result) + + # TODO: fix this broken test # def test_console_encode(): diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py new file mode 100644 index 0000000000000..d1795f2816817 --- /dev/null +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -0,0 +1,462 @@ +"""Tests for Table Schema integration.""" +import json +from collections import OrderedDict + +import numpy as np +import pandas as pd +import pytest + +from pandas import DataFrame +from pandas.types.dtypes import PeriodDtype, CategoricalDtype, DatetimeTZDtype +import pandas.util.testing as tm +from pandas.io.json.table_schema import ( + as_json_table_type, build_table_schema, make_field, set_default_names +) + + +class TestBuildSchema(tm.TestCase): + + def setUp(self): + self.df = DataFrame( + {'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'c'], + 'C': pd.date_range('2016-01-01', freq='d', periods=4), + 'D': pd.timedelta_range('1H', periods=4, freq='T'), + }, + index=pd.Index(range(4), name='idx')) + + def test_build_table_schema(self): + result = build_table_schema(self.df, version=False) + expected = { + 'fields': [{'name': 'idx', 'type': 'integer'}, + {'name': 'A', 'type': 'integer'}, + {'name': 'B', 'type': 'string'}, + {'name': 'C', 'type': 'datetime'}, + {'name': 'D', 'type': 'duration'}, + ], + 'primaryKey': ['idx'] + } + self.assertEqual(result, expected) + result = build_table_schema(self.df) + self.assertTrue("pandas_version" in result) + + def test_series(self): + s = pd.Series([1, 2, 3], name='foo') + result = build_table_schema(s, version=False) + expected = {'fields': [{'name': 'index', 'type': 'integer'}, + {'name': 'foo', 'type': 'integer'}], + 'primaryKey': ['index']} + self.assertEqual(result, expected) + result = build_table_schema(s) + self.assertTrue('pandas_version' in result) + + def tets_series_unnamed(self): + result = build_table_schema(pd.Series([1, 2, 3]), version=False) + expected = {'fields': [{'name': 'index', 'type': 'integer'}, + {'name': 'values', 'type': 'integer'}], + 'primaryKey': ['index']} + self.assertEqual(result, expected) + + def test_multiindex(self): + df = self.df.copy() + idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)]) + df.index = idx + + result = build_table_schema(df, version=False) + expected = { + 'fields': [{'name': 'level_0', 'type': 'string'}, + {'name': 'level_1', 'type': 'integer'}, + {'name': 'A', 'type': 'integer'}, + {'name': 'B', 'type': 'string'}, + {'name': 'C', 'type': 'datetime'}, + {'name': 'D', 'type': 'duration'}, + ], + 'primaryKey': ['level_0', 'level_1'] + } + self.assertEqual(result, expected) + + df.index.names = ['idx0', None] + expected['fields'][0]['name'] = 'idx0' + expected['primaryKey'] = ['idx0', 'level_1'] + result = build_table_schema(df, version=False) + self.assertEqual(result, expected) + + +class TestTableSchemaType(tm.TestCase): + + def test_as_json_table_type_int_data(self): + int_data = [1, 2, 3] + int_types = [np.int, np.int16, np.int32, np.int64] + for t in int_types: + self.assertEqual(as_json_table_type(np.array(int_data, dtype=t)), + 'integer') + + def test_as_json_table_type_float_data(self): + float_data = [1., 2., 3.] + float_types = [np.float, np.float16, np.float32, np.float64] + for t in float_types: + self.assertEqual(as_json_table_type(np.array(float_data, + dtype=t)), + 'number') + + def test_as_json_table_type_bool_data(self): + bool_data = [True, False] + bool_types = [bool, np.bool] + for t in bool_types: + self.assertEqual(as_json_table_type(np.array(bool_data, dtype=t)), + 'boolean') + + def test_as_json_table_type_date_data(self): + date_data = [pd.to_datetime(['2016']), + pd.to_datetime(['2016'], utc=True), + pd.Series(pd.to_datetime(['2016'])), + pd.Series(pd.to_datetime(['2016'], utc=True)), + pd.period_range('2016', freq='A', periods=3)] + for arr in date_data: + self.assertEqual(as_json_table_type(arr), 'datetime') + + def test_as_json_table_type_string_data(self): + strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])] + for t in strings: + self.assertEqual(as_json_table_type(t), 'string') + + def test_as_json_table_type_categorical_data(self): + self.assertEqual(as_json_table_type(pd.Categorical(['a'])), 'any') + self.assertEqual(as_json_table_type(pd.Categorical([1])), 'any') + self.assertEqual(as_json_table_type( + pd.Series(pd.Categorical([1]))), 'any') + self.assertEqual(as_json_table_type(pd.CategoricalIndex([1])), 'any') + self.assertEqual(as_json_table_type(pd.Categorical([1])), 'any') + + # ------ + # dtypes + # ------ + def test_as_json_table_type_int_dtypes(self): + integers = [np.int, np.int16, np.int32, np.int64] + for t in integers: + self.assertEqual(as_json_table_type(t), 'integer') + + def test_as_json_table_type_float_dtypes(self): + floats = [np.float, np.float16, np.float32, np.float64] + for t in floats: + self.assertEqual(as_json_table_type(t), 'number') + + def test_as_json_table_type_bool_dtypes(self): + bools = [bool, np.bool] + for t in bools: + self.assertEqual(as_json_table_type(t), 'boolean') + + def test_as_json_table_type_date_dtypes(self): + # TODO: datedate.date? datetime.time? + dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(), + DatetimeTZDtype('ns', 'US/Central')] + for t in dates: + self.assertEqual(as_json_table_type(t), 'datetime') + + def test_as_json_table_type_timedelta_dtypes(self): + durations = [np.timedelta64, np.dtype("<m8[ns]")] + for t in durations: + self.assertEqual(as_json_table_type(t), 'duration') + + def test_as_json_table_type_string_dtypes(self): + strings = [object] # TODO + for t in strings: + self.assertEqual(as_json_table_type(t), 'string') + + def test_as_json_table_type_categorical_dtypes(self): + self.assertEqual(as_json_table_type(pd.Categorical), 'any') + self.assertEqual(as_json_table_type(CategoricalDtype()), 'any') + + +class TestTableOrient(tm.TestCase): + + def setUp(self): + self.df = DataFrame( + {'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'c'], + 'C': pd.date_range('2016-01-01', freq='d', periods=4), + 'D': pd.timedelta_range('1H', periods=4, freq='T'), + 'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])), + 'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'], + ordered=True)), + 'G': [1., 2., 3, 4.], + 'H': pd.date_range('2016-01-01', freq='d', periods=4, + tz='US/Central'), + }, + index=pd.Index(range(4), name='idx')) + + def test_build_series(self): + s = pd.Series([1, 2], name='a') + s.index.name = 'id' + result = s.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + + self.assertTrue("pandas_version" in result['schema']) + result['schema'].pop('pandas_version') + + fields = [{'name': 'id', 'type': 'integer'}, + {'name': 'a', 'type': 'integer'}] + + schema = { + 'fields': fields, + 'primaryKey': ['id'], + } + + expected = OrderedDict([ + ('schema', schema), + ('data', [OrderedDict([('id', 0), ('a', 1)]), + OrderedDict([('id', 1), ('a', 2)])])]) + assert result == expected + + def test_to_json(self): + df = self.df.copy() + df.index.name = 'idx' + result = df.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + + self.assertTrue("pandas_version" in result['schema']) + result['schema'].pop('pandas_version') + + fields = [ + {'name': 'idx', 'type': 'integer'}, + {'name': 'A', 'type': 'integer'}, + {'name': 'B', 'type': 'string'}, + {'name': 'C', 'type': 'datetime'}, + {'name': 'D', 'type': 'duration'}, + {'constraints': {'enum': ['a', 'b', 'c']}, + 'name': 'E', + 'ordered': False, + 'type': 'any'}, + {'constraints': {'enum': ['a', 'b', 'c']}, + 'name': 'F', + 'ordered': True, + 'type': 'any'}, + {'name': 'G', 'type': 'number'}, + {'name': 'H', 'type': 'datetime', 'tz': 'US/Central'} + ] + + schema = { + 'fields': fields, + 'primaryKey': ['idx'], + } + data = [ + OrderedDict([('idx', 0), ('A', 1), ('B', 'a'), + ('C', '2016-01-01T00:00:00.000Z'), + ('D', 'P0DT1H0M0S'), + ('E', 'a'), ('F', 'a'), ('G', 1.), + ('H', '2016-01-01T06:00:00.000Z') + ]), + OrderedDict([('idx', 1), ('A', 2), ('B', 'b'), + ('C', '2016-01-02T00:00:00.000Z'), + ('D', 'P0DT1H1M0S'), + ('E', 'b'), ('F', 'b'), ('G', 2.), + ('H', '2016-01-02T06:00:00.000Z') + ]), + OrderedDict([('idx', 2), ('A', 3), ('B', 'c'), + ('C', '2016-01-03T00:00:00.000Z'), + ('D', 'P0DT1H2M0S'), + ('E', 'c'), ('F', 'c'), ('G', 3.), + ('H', '2016-01-03T06:00:00.000Z') + ]), + OrderedDict([('idx', 3), ('A', 4), ('B', 'c'), + ('C', '2016-01-04T00:00:00.000Z'), + ('D', 'P0DT1H3M0S'), + ('E', 'c'), ('F', 'c'), ('G', 4.), + ('H', '2016-01-04T06:00:00.000Z') + ]), + ] + expected = OrderedDict([('schema', schema), ('data', data)]) + self.assertEqual(result, expected) + + def test_to_json_float_index(self): + data = pd.Series(1, index=[1., 2.]) + result = data.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + result['schema'].pop('pandas_version') + + expected = ( + OrderedDict([('schema', { + 'fields': [{'name': 'index', 'type': 'number'}, + {'name': 'values', 'type': 'integer'}], + 'primaryKey': ['index'] + }), + ('data', [OrderedDict([('index', 1.0), ('values', 1)]), + OrderedDict([('index', 2.0), ('values', 1)])])]) + ) + self.assertEqual(result, expected) + + def test_to_json_period_index(self): + idx = pd.period_range('2016', freq='Q-JAN', periods=2) + data = pd.Series(1, idx) + result = data.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + result['schema'].pop('pandas_version') + + fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'}, + {'name': 'values', 'type': 'integer'}] + + schema = {'fields': fields, 'primaryKey': ['index']} + data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'), + ('values', 1)]), + OrderedDict([('index', '2016-02-01T00:00:00.000Z'), + ('values', 1)])] + expected = OrderedDict([('schema', schema), ('data', data)]) + self.assertEqual(result, expected) + + def test_to_json_categorical_index(self): + data = pd.Series(1, pd.CategoricalIndex(['a', 'b'])) + result = data.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + result['schema'].pop('pandas_version') + + expected = ( + OrderedDict([('schema', + {'fields': [{'name': 'index', 'type': 'any', + 'constraints': {'enum': ['a', 'b']}, + 'ordered': False}, + {'name': 'values', 'type': 'integer'}], + 'primaryKey': ['index']}), + ('data', [ + OrderedDict([('index', 'a'), + ('values', 1)]), + OrderedDict([('index', 'b'), ('values', 1)])])]) + ) + self.assertEqual(result, expected) + + def test_date_format_raises(self): + with tm.assertRaises(ValueError): + self.df.to_json(orient='table', date_format='epoch') + + # others work + self.df.to_json(orient='table', date_format='iso') + self.df.to_json(orient='table') + + def test_make_field_int(self): + data = [1, 2, 3] + kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] + for kind in kinds: + result = make_field(kind) + expected = {"name": "name", "type": 'integer'} + self.assertEqual(result, expected) + + def test_make_field_float(self): + data = [1., 2., 3.] + kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] + for kind in kinds: + result = make_field(kind) + expected = {"name": "name", "type": 'number'} + self.assertEqual(result, expected) + + def test_make_field_datetime(self): + data = [1., 2., 3.] + kinds = [pd.Series(pd.to_datetime(data), name='values'), + pd.to_datetime(data)] + for kind in kinds: + result = make_field(kind) + expected = {"name": "values", "type": 'datetime'} + self.assertEqual(result, expected) + + kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'), + pd.to_datetime(data, utc=True)] + for kind in kinds: + result = make_field(kind) + expected = {"name": "values", "type": 'datetime', "tz": "UTC"} + self.assertEqual(result, expected) + + arr = pd.period_range('2016', freq='A-DEC', periods=4) + result = make_field(arr) + expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"} + self.assertEqual(result, expected) + + def test_make_field_categorical(self): + data = ['a', 'b', 'c'] + ordereds = [True, False] + + for ordered in ordereds: + arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats') + result = make_field(arr) + expected = {"name": "cats", "type": "any", + "constraints": {"enum": data}, + "ordered": ordered} + self.assertEqual(result, expected) + + arr = pd.CategoricalIndex(data, ordered=ordered, name='cats') + result = make_field(arr) + expected = {"name": "cats", "type": "any", + "constraints": {"enum": data}, + "ordered": ordered} + self.assertEqual(result, expected) + + def test_categorical(self): + s = pd.Series(pd.Categorical(['a', 'b', 'a'])) + s.index.name = 'idx' + result = s.to_json(orient='table', date_format='iso') + result = json.loads(result, object_pairs_hook=OrderedDict) + result['schema'].pop('pandas_version') + + fields = [{'name': 'idx', 'type': 'integer'}, + {'constraints': {'enum': ['a', 'b']}, + 'name': 'values', + 'ordered': False, + 'type': 'any'}] + + expected = OrderedDict([ + ('schema', {'fields': fields, + 'primaryKey': ['idx']}), + ('data', [OrderedDict([('idx', 0), ('values', 'a')]), + OrderedDict([('idx', 1), ('values', 'b')]), + OrderedDict([('idx', 2), ('values', 'a')])])]) + self.assertEqual(result, expected) + + def test_set_default_names_unset(self): + data = pd.Series(1, pd.Index([1])) + result = set_default_names(data) + self.assertEqual(result.index.name, 'index') + + def test_set_default_names_set(self): + data = pd.Series(1, pd.Index([1], name='myname')) + result = set_default_names(data) + self.assertEqual(result.index.name, 'myname') + + def test_set_default_names_mi_unset(self): + data = pd.Series( + 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')])) + result = set_default_names(data) + self.assertEqual(result.index.names, ['level_0', 'level_1']) + + def test_set_default_names_mi_set(self): + data = pd.Series( + 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], + names=['n1', 'n2'])) + result = set_default_names(data) + self.assertEqual(result.index.names, ['n1', 'n2']) + + def test_set_default_names_mi_partion(self): + data = pd.Series( + 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], + names=['n1', None])) + result = set_default_names(data) + self.assertEqual(result.index.names, ['n1', 'level_1']) + + def test_timestamp_in_columns(self): + df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'), + pd.Timedelta(10, unit='s')]) + result = df.to_json(orient="table") + js = json.loads(result) + assert js['schema']['fields'][1]['name'] == 1451606400000 + assert js['schema']['fields'][2]['name'] == 10000 + + def test_overlapping_names(self): + cases = [ + pd.Series([1], index=pd.Index([1], name='a'), name='a'), + pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")), + pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([ + ['a'], [1] + ], names=["A", "a"])), + ] + + for data in cases: + with pytest.raises(ValueError) as excinfo: + data.to_json(orient='table') + + assert 'Overlapping' in str(excinfo.value) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e4b10488c69b2..c5e5df9037daa 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -424,6 +424,25 @@ def _skip_if_not_us_locale(): import pytest pytest.skip("Specific locale is set {0}".format(lang)) + +def _skip_if_no_mock(): + try: + import mock # noqa + except ImportError: + try: + from unittest import mock # noqa + except ImportError: + import nose + raise nose.SkipTest("mock is not installed") + + +def _skip_if_no_ipython(): + try: + import IPython # noqa + except ImportError: + import nose + raise nose.SkipTest("IPython not installed") + # ----------------------------------------------------------------------------- # locale utilities
Lays the groundwork for (but doesn't close) https://github.com/pandas-dev/pandas/issues/14386 This handles the schema part of the request there. We'll still need to do the work to publish the data to the frontend, but that can be done as a followup. Usage: ```python In [4]: df = pd.DataFrame( ...: {'A': [1, 2, 3], ...: 'B': ['a', 'b', 'c'], ...: 'C': pd.date_range('2016-01-01', freq='d', periods=3), ...: }, index=pd.Index(range(3), name='idx')) ...: df ...: Out[4]: A B C idx 0 1 a 2016-01-01 1 2 b 2016-01-02 2 3 c 2016-01-03 In [5]: In [5]: pd.to_json_schema(df) Out[5]: {'fields': [{'name': 'idx', 'type': 'integer'}, {'name': 'A', 'type': 'integer'}, {'name': 'B', 'type': 'string'}, {'name': 'C', 'type': 'date'}], 'primary_key': 'idx'} ``` --- I think this is useful enough on its own to be part of the public API, so I've documented as such. I've included a placeholder `publish_tableschema` that will *not* be included in the final commit. It's just to make @rgbkrk's life easier for prototyping the nteract frontend. I think the proper solution for publishing the schema + data will have to wait on https://github.com/ipython/ipython/issues/10090
https://api.github.com/repos/pandas-dev/pandas/pulls/14904
2016-12-17T18:20:08Z
2017-03-04T11:50:05Z
2017-03-04T11:50:05Z
2017-04-05T02:07:22Z
Catch warning introduced by GH14432 in test case
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index d3c7bc2adbb4a..99bea3a10115b 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -349,9 +349,19 @@ def test_groupby_multi_categorical_as_index(self): 'B': [101.0, nan, nan, 205.0, nan, nan]}, columns=['cat', 'A', 'B']) + group_columns = ['cat', 'A'] + for name in [None, 'X', 'B', 'cat']: df.index = Index(list("abc"), name=name) - result = df.groupby(['cat', 'A'], as_index=False).sum() + + if name in group_columns and name in df.index.names: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = df.groupby(group_columns, as_index=False).sum() + + else: + result = df.groupby(group_columns, as_index=False).sum() + tm.assert_frame_equal(result, expected, check_index_type=True) def test_groupby_preserve_categorical_dtype(self):
Follow on to #14432 to catch the newly introduced `FutureWarning` in the `test_groupby_multi_categorical_as_index` test case.
https://api.github.com/repos/pandas-dev/pandas/pulls/14902
2016-12-17T15:47:33Z
2016-12-17T23:03:34Z
null
2016-12-17T23:04:24Z
BLD: swap 3.6-dev and 3.4 builds, reorg build order
diff --git a/.travis.yml b/.travis.yml index be167451f3460..3e24f3798ca04 100644 --- a/.travis.yml +++ b/.travis.yml @@ -66,19 +66,6 @@ matrix: apt: packages: - python-gtk2 - - python: 3.4 - env: - - PYTHON_VERSION=3.4 - - JOB_NAME: "34_nslow" - - NOSE_ARGS="not slow and not disabled" - - FULL_DEPS=true - - CLIPBOARD=xsel - - CACHE_NAME="34_nslow" - - USE_CACHE=true - addons: - apt: - packages: - - xsel - python: 3.5 env: - PYTHON_VERSION=3.5 @@ -93,6 +80,33 @@ matrix: apt: packages: - xsel + - python: 3.6-dev + env: + - PYTHON_VERSION=3.6 + - JOB_NAME: "36_dev" + - JOB_TAG=_DEV + - NOSE_ARGS="not slow and not network and not disabled" + - PANDAS_TESTING_MODE="deprecate" + addons: + apt: + packages: + - libatlas-base-dev + - gfortran +# In allow_failures + - python: 2.7 + env: + - PYTHON_VERSION=2.7 + - JOB_NAME: "27_nslow_nnet_COMPAT" + - NOSE_ARGS="not slow and not network and not disabled" + - LOCALE_OVERRIDE="it_IT.UTF-8" + - INSTALL_TEST=true + - JOB_TAG=_COMPAT + - CACHE_NAME="27_nslow_nnet_COMPAT" + - USE_CACHE=true + addons: + apt: + packages: + - language-pack-it # In allow_failures - python: 2.7 env: @@ -103,45 +117,46 @@ matrix: - FULL_DEPS=true - CACHE_NAME="27_slow" - USE_CACHE=true +# In allow_failures + - python: 2.7 + env: + - PYTHON_VERSION=2.7 + - JOB_NAME: "27_build_test_conda" + - JOB_TAG=_BUILD_TEST + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - BUILD_TEST=true + - CACHE_NAME="27_build_test_conda" + - USE_CACHE=true # In allow_failures - python: 3.4 env: - PYTHON_VERSION=3.4 - - JOB_NAME: "34_slow" - - JOB_TAG=_SLOW - - NOSE_ARGS="slow and not network and not disabled" + - JOB_NAME: "34_nslow" + - NOSE_ARGS="not slow and not disabled" - FULL_DEPS=true - CLIPBOARD=xsel - - CACHE_NAME="34_slow" + - CACHE_NAME="34_nslow" - USE_CACHE=true addons: apt: packages: - xsel # In allow_failures - - python: 2.7 + - python: 3.4 env: - - PYTHON_VERSION=2.7 - - JOB_NAME: "27_build_test_conda" - - JOB_TAG=_BUILD_TEST - - NOSE_ARGS="not slow and not disabled" + - PYTHON_VERSION=3.4 + - JOB_NAME: "34_slow" + - JOB_TAG=_SLOW + - NOSE_ARGS="slow and not network and not disabled" - FULL_DEPS=true - - BUILD_TEST=true - - CACHE_NAME="27_build_test_conda" + - CLIPBOARD=xsel + - CACHE_NAME="34_slow" - USE_CACHE=true -# In allow_failures - - python: 3.6-dev - env: - - PYTHON_VERSION=3.6 - - JOB_NAME: "36_dev" - - JOB_TAG=_DEV - - NOSE_ARGS="not slow and not network and not disabled" - - PANDAS_TESTING_MODE="deprecate" addons: apt: packages: - - libatlas-base-dev - - gfortran + - xsel # In allow_failures - python: 3.5 env: @@ -157,21 +172,6 @@ matrix: packages: - libatlas-base-dev - gfortran -# In allow_failures - - python: 2.7 - env: - - PYTHON_VERSION=2.7 - - JOB_NAME: "27_nslow_nnet_COMPAT" - - NOSE_ARGS="not slow and not network and not disabled" - - LOCALE_OVERRIDE="it_IT.UTF-8" - - INSTALL_TEST=true - - JOB_TAG=_COMPAT - - CACHE_NAME="27_nslow_nnet_COMPAT" - - USE_CACHE=true - addons: - apt: - packages: - - language-pack-it # In allow_failures - python: 3.5 env: @@ -226,18 +226,19 @@ matrix: - BUILD_TEST=true - CACHE_NAME="27_build_test_conda" - USE_CACHE=true - - python: 3.6-dev + - python: 3.4 env: - - PYTHON_VERSION=3.6 - - JOB_NAME: "36_dev" - - JOB_TAG=_DEV - - NOSE_ARGS="not slow and not network and not disabled" - - PANDAS_TESTING_MODE="deprecate" + - PYTHON_VERSION=3.4 + - JOB_NAME: "34_nslow" + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD=xsel + - CACHE_NAME="34_nslow" + - USE_CACHE=true addons: apt: packages: - - libatlas-base-dev - - gfortran + - xsel - python: 3.5 env: - PYTHON_VERSION=3.5
https://api.github.com/repos/pandas-dev/pandas/pulls/14899
2016-12-16T13:35:42Z
2016-12-16T13:35:47Z
2016-12-16T13:35:47Z
2016-12-16T13:35:47Z