title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DOC: update pandas.DataFrame.boxplot docstring. Fixes #8847
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 6c3d07124215b..b8485eed758a1 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1995,50 +1995,164 @@ def plot_series(data, kind='line', ax=None, # Series unique _shared_docs['boxplot'] = """ - Make a box plot from DataFrame column optionally grouped by some columns or - other inputs + Make a box plot from DataFrame columns. + + Make a box-and-whisker plot from DataFrame columns, optionally grouped + by some other columns. A box plot is a method for graphically depicting + groups of numerical data through their quartiles. + The box extends from the Q1 to Q3 quartile values of the data, + with a line at the median (Q2). The whiskers extend from the edges + of box to show the range of the data. The position of the whiskers + is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box. + Outlier points are those past the end of the whiskers. + + For further details see + Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_. Parameters ---------- - data : the pandas object holding the data - column : column name or list of names, or vector - Can be any valid input to groupby - by : string or sequence - Column in the DataFrame to group by - ax : Matplotlib axes object, optional - fontsize : int or string - rot : label rotation angle + column : str or list of str, optional + Column name or list of names, or vector. + Can be any valid input to :meth:`pandas.DataFrame.groupby`. + by : str or array-like, optional + Column in the DataFrame to :meth:`pandas.DataFrame.groupby`. + One box-plot will be done per value of columns in `by`. + ax : object of class matplotlib.axes.Axes, optional + The matplotlib axes to be used by boxplot. + fontsize : float or str + Tick label font size in points or as a string (e.g., `large`). + rot : int or float, default 0 + The rotation angle of labels (in degrees) + with respect to the screen coordinate sytem. + grid : boolean, default True + Setting this to True will show the grid. figsize : A tuple (width, height) in inches - grid : Setting this to True will show the grid - layout : tuple (optional) - (rows, columns) for the layout of the plot - return_type : {None, 'axes', 'dict', 'both'}, default None - The kind of object to return. The default is ``axes`` - 'axes' returns the matplotlib axes the boxplot is drawn on; - 'dict' returns a dictionary whose values are the matplotlib - Lines of the boxplot; - 'both' returns a namedtuple with the axes and dict. - - When grouping with ``by``, a Series mapping columns to ``return_type`` - is returned, unless ``return_type`` is None, in which case a NumPy - array of axes is returned with the same shape as ``layout``. - See the prose documentation for more. - - `**kwds` : Keyword Arguments + The size of the figure to create in matplotlib. + layout : tuple (rows, columns), optional + For example, (3, 5) will display the subplots + using 3 columns and 5 rows, starting from the top-left. + return_type : {'axes', 'dict', 'both'} or None, default 'axes' + The kind of object to return. The default is ``axes``. + + * 'axes' returns the matplotlib axes the boxplot is drawn on. + * 'dict' returns a dictionary whose values are the matplotlib + Lines of the boxplot. + * 'both' returns a namedtuple with the axes and dict. + * when grouping with ``by``, a Series mapping columns to + ``return_type`` is returned. + + If ``return_type`` is `None`, a NumPy array + of axes with the same shape as ``layout`` is returned. + **kwds All other plotting keyword arguments to be passed to - matplotlib's boxplot function + :func:`matplotlib.pyplot.boxplot`. Returns ------- - lines : dict - ax : matplotlib Axes - (ax, lines): namedtuple + result : + + The return type depends on the `return_type` parameter: + + * 'axes' : object of class matplotlib.axes.Axes + * 'dict' : dict of matplotlib.lines.Line2D objects + * 'both' : a nametuple with strucure (ax, lines) + + For data grouped with ``by``: + + * :class:`~pandas.Series` + * :class:`~numpy.array` (for ``return_type = None``) + + See Also + -------- + Series.plot.hist: Make a histogram. + matplotlib.pyplot.boxplot : Matplotlib equivalent plot. Notes ----- Use ``return_type='dict'`` when you want to tweak the appearance of the lines after plotting. In this case a dict containing the Lines making up the boxes, caps, fliers, medians, and whiskers is returned. + + Examples + -------- + + Boxplots can be created for every column in the dataframe + by ``df.boxplot()`` or indicating the columns to be used: + + .. plot:: + :context: close-figs + + >>> np.random.seed(1234) + >>> df = pd.DataFrame(np.random.randn(10,4), + ... columns=['Col1', 'Col2', 'Col3', 'Col4']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) + + Boxplots of variables distributions grouped by the values of a third + variable can be created using the option ``by``. For instance: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 2), + ... columns=['Col1', 'Col2']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> boxplot = df.boxplot(by='X') + + A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot + in order to group the data by combination of the variables in the x-axis: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10,3), + ... columns=['Col1', 'Col2', 'Col3']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', + ... 'B', 'A', 'B', 'A', 'B']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + + The layout of boxplot can be adjusted giving a tuple to ``layout``: + + .. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... layout=(2, 1)) + + Additional formatting can be done to the boxplot, like suppressing the grid + (``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) + or changing the fontsize (i.e. ``fontsize=15``): + + .. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) + + The parameter ``return_type`` can be used to select the type of element + returned by `boxplot`. When ``return_type='axes'`` is selected, + the matplotlib axes on which the boxplot is drawn are returned: + + >>> boxplot = df.boxplot(column=['Col1','Col2'], return_type='axes') + >>> type(boxplot) + <class 'matplotlib.axes._subplots.AxesSubplot'> + + When grouping with ``by``, a Series mapping columns to ``return_type`` + is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type='axes') + >>> type(boxplot) + <class 'pandas.core.series.Series'> + + If ``return_type`` is `None`, a NumPy array of axes with the same shape + as ``layout`` is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type=None) + >>> type(boxplot) + <class 'numpy.ndarray'> """
- [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py pandas.DataFrame.boxplot` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single pandas.DataFrame.boxplot` - [x] It has been proofread on language by another sprint participant ``` ################################################################################ ##################### Docstring (pandas.DataFrame.boxplot) ##################### ################################################################################ Make a box plot from DataFrame columns. Make a box-and-whisker plot from DataFrame columns optionally grouped by some other columns. A box plot is a method for graphically depicting groups of numerical data through their quartiles. The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2).The whiskers extend from the edges of box to show the range of the data. The position of the whiskers is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the box. Outlier points are those past the end of the whiskers. For further details see Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_. Parameters ---------- column : str or list of str, optional Column name or list of names, or vector. Can be any valid input to groupby. by : str or array-like Column in the DataFrame to groupby. ax : object of class matplotlib.axes.Axes, default `None` The matplotlib axes to be used by boxplot. fontsize : float or str Tick label font size in points or as a string (e.g., ‘large’) (see `matplotlib.axes.Axes.tick_params <https://matplotlib.org/api/_as_gen/ matplotlib.axes.Axes.tick_params.html>`_). rot : int or float, default 0 The rotation angle of labels (in degrees) with respect to the screen coordinate sytem. grid : boolean, default `True` Setting this to True will show the grid. figsize : A tuple (width, height) in inches The size of the figure to create in matplotlib. layout : tuple (rows, columns) (optional) For example, (3, 5) will display the subplots using 3 columns and 5 rows, starting from the top-left. return_type : {None, 'axes', 'dict', 'both'}, default 'axes' The kind of object to return. The default is ``axes``. * 'axes' returns the matplotlib axes the boxplot is drawn on. * 'dict' returns a dictionary whose values are the matplotlib Lines of the boxplot. * 'both' returns a namedtuple with the axes and dict. * when grouping with ``by``, a Series mapping columns to ``return_type`` is returned (i.e. ``df.boxplot(column=['Col1','Col2'], by='var',return_type='axes')`` may return ``Series([AxesSubplot(..),AxesSubplot(..)], index=['Col1','Col2'])``). If ``return_type`` is `None`, a NumPy array of axes with the same shape as ``layout`` is returned (i.e. ``df.boxplot(column=['Col1','Col2'], by='var',return_type=None)`` may return a ``array([<matplotlib.axes._subplots.AxesSubplot object at ..>, <matplotlib.axes._subplots.AxesSubplot object at ..>], dtype=object)``). **kwds : Keyword Arguments (optional) All other plotting keyword arguments to be passed to `matplotlib.pyplot.boxplot <https://matplotlib.org/api/_as_gen/ matplotlib.pyplot.boxplot.html#matplotlib.pyplot.boxplot>`_. Returns ------- result: Options: * ax : object of class matplotlib.axes.Axes (for ``return_type='axes'``) * lines : dict (for ``return_type='dict'``) * (ax, lines): namedtuple (for ``return_type='both'``) * :class:`~pandas.Series` (for ``return_type != None`` and data grouped with ``by``) * :class:`~numpy.array` (for ``return_type=None`` and data grouped with ``by``) See Also -------- matplotlib.pyplot.boxplot: Make a box and whisker plot. matplotlib.pyplot.hist: Make a hsitogram. Notes ----- Use ``return_type='dict'`` when you want to tweak the appearance of the lines after plotting. In this case a dict containing the Lines making up the boxes, caps, fliers, medians, and whiskers is returned. Examples -------- Boxplots can be created for every column in the dataframe by ``df.boxplot()`` or indicating the columns to be used: .. plot:: :context: close-figs >>> np.random.seed(1234) >>> df = pd.DataFrame(np.random.rand(10,4), ... columns=['Col1', 'Col2', 'Col3', 'Col4']) >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) Boxplots of variables distributions grouped by a third variable values can be created using the option ``by``. For instance: .. plot:: :context: close-figs >>> df = pd.DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] ) >>> df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) >>> boxplot = df.boxplot(by='X') A list of strings (i.e. ``['X','Y']``) containing can be passed to boxplot in order to group the data by combination of the variables in the x-axis: .. plot:: :context: close-figs >>> df = pd.DataFrame(np.random.rand(10,3), ... columns=['Col1', 'Col2', 'Col3']) >>> df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) >>> df['Y'] = pd.Series(['A','B','A','B','A','B','A','B','A','B']) >>> boxplot = df.boxplot(column=['Col1','Col2'], by=['X','Y']) The layout of boxplot can be adjusted giving a tuple to ``layout``: .. plot:: :context: close-figs >>> df = pd.DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2']) >>> df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) >>> boxplot = df.boxplot(by='X', layout=(2,1)) Additional formatting can be done to the boxplot, like suppressing the grid (``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) or changing the fontsize (i.e. ``fontsize=15``): .. plot:: :context: close-figs >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameters {'kwds'} not documented Unknown parameters {'**kwds'} ``` <img width="507" alt="captura de pantalla 2018-03-12 a las 0 51 51" src="https://user-images.githubusercontent.com/15342068/37260125-ab870588-258f-11e8-837d-80497fb392eb.png"> <img width="496" alt="captura de pantalla 2018-03-12 a las 0 52 04" src="https://user-images.githubusercontent.com/15342068/37260124-ab4d8d3a-258f-11e8-9dd9-9c5c7dca16cf.png"> <img width="492" alt="captura de pantalla 2018-03-12 a las 0 52 12" src="https://user-images.githubusercontent.com/15342068/37260123-ab316ed4-258f-11e8-863a-2c9e8fe1e67d.png"> <img width="494" alt="captura de pantalla 2018-03-12 a las 0 52 18" src="https://user-images.githubusercontent.com/15342068/37260122-ab1480e4-258f-11e8-9b2b-debe1c7355da.png"> @EliosMolina
https://api.github.com/repos/pandas-dev/pandas/pulls/20152
2018-03-10T14:26:46Z
2018-04-03T07:02:31Z
2018-04-03T07:02:31Z
2018-04-03T18:36:35Z
DOC: update the pandas.Series.dt.is_leap_year docstring
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4baae4f0e7f1a..ac7db1dbc4b8f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1945,7 +1945,43 @@ def freq(self, value): is_leap_year = _field_accessor( 'is_leap_year', 'is_leap_year', - "Logical indicating if the date belongs to a leap year") + """ + Boolean indicator if the date belongs to a leap year. + + A leap year is a year, which has 366 days (instead of 365) including + 29th of February as an intercalary day. + Leap years are years which are multiples of four with the exception + of years divisible by 100 but not by 400. + + Returns + ------- + Series or ndarray + Booleans indicating if dates belong to a leap year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y") + >>> idx + DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'], + dtype='datetime64[ns]', freq='A-DEC') + >>> idx.is_leap_year + array([ True, False, False], dtype=bool) + + >>> dates = pd.Series(idx) + >>> dates_series + 0 2012-12-31 + 1 2013-12-31 + 2 2014-12-31 + dtype: datetime64[ns] + >>> dates_series.dt.is_leap_year + 0 True + 1 False + 2 False + dtype: bool + """) @property def time(self):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x ] PR title is "DOC: update the <your-function-or-method> docstring" - [x ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.Series.dt.is_leap_year) ################## ################################################################################ Return a boolean indicating if the date belongs to a leap year. A leap year is a year, occurring every four years, which has 366 days (instead of 365) including 29th of February as an intercalary day. Returns ------- is_leap_year : Series of boolean Examples -------- >>> import pandas as pd >>> dates = pd.date_range("2012-01-01", "2015-01-01", freq="Y") >>> dates_series = pd.Series(dates) >>> dates_series 0 2012-12-31 1 2013-12-31 2 2014-12-31 dtype: datetime64[ns] >>> dates_series.dt.is_leap_year 0 True 1 False 2 False dtype: bool ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. See Also is not relevant for this function as there is no other leap year related functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/20150
2018-03-10T14:23:37Z
2018-03-17T11:31:06Z
2018-03-17T11:31:06Z
2018-03-17T11:31:14Z
DOC: update the to_json() docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..bc5b77a8c19cb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1664,9 +1664,11 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, Parameters ---------- - path_or_buf : the path or buffer to write the result string - if this is None, return the converted string + path_or_buf : string or file handle, optional + File path or object. If not specified, the result is returned as + a string. orient : string + Indication of expected JSON string format. * Series @@ -1681,27 +1683,29 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, * The format of the JSON string - - split : dict like - {index -> [index], columns -> [columns], data -> [values]} - - records : list like + - 'split' : dict like {'index' -> [index], + 'columns' -> [columns], 'data' -> [values]} + - 'records' : list like [{column -> value}, ... , {column -> value}] - - index : dict like {index -> {column -> value}} - - columns : dict like {column -> {index -> value}} - - values : just the values array - - table : dict like {'schema': {schema}, 'data': {data}} + - 'index' : dict like {index -> {column -> value}} + - 'columns' : dict like {column -> {index -> value}} + - 'values' : just the values array + - 'table' : dict like {'schema': {schema}, 'data': {data}} describing the data, and the data component is like ``orient='records'``. .. versionchanged:: 0.20.0 date_format : {None, 'epoch', 'iso'} - Type of date conversion. `epoch` = epoch milliseconds, - `iso` = ISO8601. The default depends on the `orient`. For - `orient='table'`, the default is `'iso'`. For all other orients, - the default is `'epoch'`. - double_precision : The number of decimal places to use when encoding - floating point values, default 10. - force_ascii : force encoded string to be ASCII, default True. + Type of date conversion. 'epoch' = epoch milliseconds, + 'iso' = ISO8601. The default depends on the `orient`. For + ``orient='table'``, the default is 'iso'. For all other orients, + the default is 'epoch'. + double_precision : int, default 10 + The number of decimal places to use when encoding + floating point values. + force_ascii : boolean, default True + Force encoded string to be ASCII. date_unit : string, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, @@ -1730,13 +1734,9 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, .. versionadded:: 0.23.0 - Returns - ------- - same type as input object with filtered info axis - See Also -------- - pd.read_json + pandas.read_json Examples -------- @@ -1749,16 +1749,26 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' + Encoding/decoding a Dataframe using ``'records'`` formatted JSON. + Note that index labels are not preserved with this encoding. + + >>> df.to_json(orient='records') + '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' + Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' - Encoding/decoding a Dataframe using ``'records'`` formatted JSON. - Note that index labels are not preserved with this encoding. + Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: - >>> df.to_json(orient='records') - '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' + >>> df.to_json(orient='columns') + '{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}' + + Encoding/decoding a Dataframe using ``'values'`` formatted JSON: + + >>> df.to_json(orient='values') + '[["a","b"],["c","d"]]' Encoding with Table Schema
Fixes validation errors and adds an example for each orientation. - [ x] PR title is "DOC: update the <your-function-or-method> docstring" - [ x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x ] It has been proofread on language by another sprint participant ``` Errors found: Missing description for See Also "pandas.read_json" reference Examples do not pass tests ``` I'm not able to make this pass.
https://api.github.com/repos/pandas-dev/pandas/pulls/20149
2018-03-10T14:22:48Z
2018-03-13T21:27:21Z
2018-03-13T21:27:21Z
2018-03-13T21:27:45Z
DOC: Improved the docstring of pandas.Series.filter
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f33b5746a29d3..c1964025eff26 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3924,14 +3924,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): Parameters ---------- items : list-like - List of info axis to restrict to (must not all be present) + List of axis to restrict to (must not all be present). like : string - Keep info axis where "arg in col == True" + Keep axis where "arg in col == True". regex : string (regular expression) - Keep info axis with re.search(regex, col) == True + Keep axis with re.search(regex, col) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, - 'index' for Series, 'columns' for DataFrame + 'index' for Series, 'columns' for DataFrame. Returns ------- @@ -3939,26 +3939,25 @@ def filter(self, items=None, like=None, regex=None, axis=None): Examples -------- - >>> df - one two three - mouse 1 2 3 - rabbit 4 5 6 + >>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])), + ... index=['mouse', 'rabbit'], + ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) - one three + one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) - one three + one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) - one two three + one two three rabbit 4 5 6 See Also
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ####################### Docstring (pandas.Series.filter) ####################### ################################################################################ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like List of info axis to restrict to (must not all be present). like : string Keep info axis where "arg in col == True". regex : string (regular expression) Keep info axis with re.search(regex, col) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object Examples -------- >>> df = pd.DataFrame(np.array(([1,2,3],[4,5,6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 See Also -------- pandas.DataFrame.loc : Purely label-location based indexer for selection by label. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.filter" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
https://api.github.com/repos/pandas-dev/pandas/pulls/20148
2018-03-10T14:16:18Z
2018-07-07T20:45:41Z
2018-07-07T20:45:40Z
2018-07-07T20:45:51Z
DOC: Added examples to the IndexOpsmixin.value_counts() docstring
diff --git a/pandas/core/base.py b/pandas/core/base.py index 6625a3bbe97d7..4ea8c43b640fb 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1008,7 +1008,7 @@ def map_f(values, f): def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): """ - Returns object containing counts of unique values. + Return a Series containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. @@ -1020,18 +1020,69 @@ def value_counts(self, normalize=False, sort=True, ascending=False, If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True - Sort by values + Sort by values. ascending : boolean, default False - Sort in ascending order + Sort in ascending order. bins : integer, optional Rather than count values, group them into half-open bins, - a convenience for pd.cut, only works with numeric data + a convenience for ``pd.cut``, only works with numeric data. dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series + + See Also + -------- + Series.count: number of non-NA elements in a Series + DataFrame.count: number of non-NA elements in a DataFrame + + Examples + -------- + >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) + >>> index.value_counts() + 3.0 2 + 4.0 1 + 2.0 1 + 1.0 1 + dtype: int64 + + With `normalize` set to `True`, returns the relative frequency by + dividing all values by the sum of values. + + >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) + >>> s.value_counts(normalize=True) + 3.0 0.4 + 4.0 0.2 + 2.0 0.2 + 1.0 0.2 + dtype: float64 + + **bins** + + Bins can be useful for going from a continuous variable to a + categorical variable; instead of counting unique + apparitions of values, divide the index in the specified + number of half-open bins. + + >>> s.value_counts(bins=3) + (2.0, 3.0] 2 + (0.996, 2.0] 2 + (3.0, 4.0] 1 + dtype: int64 + + **dropna** + + With `dropna` set to `False` we can also see NaN index values. + + >>> s.value_counts(dropna=False) + 3.0 2 + NaN 1 + 4.0 1 + 2.0 1 + 1.0 1 + dtype: int64 """ from pandas.core.algorithms import value_counts result = value_counts(self, sort=sort, ascending=ascending,
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ############################################################################### #################### Docstring (pandas.Index.value_counts) #################### ################################################################################ Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occurring element. Excludes NA values by default. Parameters ---------- normalize : boolean, default False If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True Sort by values. ascending : boolean, default False Sort in ascending order. bins : integer, optional Rather than count values, group them into half-open bins, a convenience for pd.cut, only works with numeric data. dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series Examples -------- >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) >>> index.value_counts() 3.0 2 4.0 1 2.0 1 1.0 1 dtype: int64 With `normalize` set to `True`, returns the relative frequency by dividing all values by the sum of values. >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) >>> s.value_counts(normalize=True) 3.0 0.4 4.0 0.2 2.0 0.2 1.0 0.2 dtype: float64 **bins** Bins can be useful for going from a continuous variable to a categorical variable; instead of counting unique apparitions of values, divide the index in the specified number of half-open bins. >>> s.value_counts(bins=3) (2.0, 3.0] 2 (0.996, 2.0] 2 (3.0, 4.0] 1 dtype: int64 **dropna** With `dropna` set to `False` we can also see NaN index values. >>> s.value_counts(dropna=False) 3.0 2 NaN 1 4.0 1 2.0 1 1.0 1 dtype: int64 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found ``` The validation is not passing because we lack an see also section, but we think that is not relevant for this method.
https://api.github.com/repos/pandas-dev/pandas/pulls/20145
2018-03-10T14:11:13Z
2018-07-07T16:32:12Z
2018-07-07T16:32:12Z
2022-10-28T10:17:41Z
DOC: update the pandas.date_range() docstring
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e5e9bba269fd4..2de95caafdd6c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2202,29 +2202,30 @@ def _generate_regular_range(start, end, periods, offset): def date_range(start=None, end=None, periods=None, freq='D', tz=None, normalize=False, name=None, closed=None, **kwargs): """ - Return a fixed frequency DatetimeIndex, with day (calendar) as the default - frequency + Return a fixed frequency DatetimeIndex. + + The default frequency is day (calendar). Parameters ---------- start : string or datetime-like, default None - Left bound for generating dates + Left bound for generating dates. end : string or datetime-like, default None - Right bound for generating dates + Right bound for generating dates. periods : integer, default None - Number of periods to generate + Number of periods to generate. freq : string or DateOffset, default 'D' (calendar daily) - Frequency strings can have multiples, e.g. '5H' + Frequency strings can have multiples, e.g. '5H'. tz : string, default None Time zone name for returning localized DatetimeIndex, for example - Asia/Hong_Kong + Asia/Hong_Kong. normalize : bool, default False - Normalize start/end dates to midnight before generating date range + Normalize start/end dates to midnight before generating date range. name : string, default None - Name of the resulting DatetimeIndex + Name of the resulting DatetimeIndex. closed : string, default None Make the interval closed with respect to the given frequency to - the 'left', 'right', or both sides (None) + the 'left', 'right', or both sides (None). Notes ----- @@ -2237,6 +2238,22 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, Returns ------- rng : DatetimeIndex + + See Also + -------- + pandas.period_range : Return a fixed frequency PeriodIndex. + pandas.interval_range : Return a fixed frequency IntervalIndex. + + Examples + -------- + >>> pd.date_range('2018-10-03', periods=2) # doctest: +NORMALIZE_WHITESPACE + DatetimeIndex(['2018-10-03', '2018-10-04'], dtype='datetime64[ns]', + freq='D') + + >>> pd.date_range(start='2018-01-01', end='20180103') + ... # doctest: +NORMALIZE_WHITESPACE + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]', freq='D') """ return DatetimeIndex(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name,
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` Errors found: Errors in parameters section Parameters {'kwargs'} not documented ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20143
2018-03-10T14:10:29Z
2018-03-11T11:59:33Z
2018-03-11T11:59:33Z
2018-03-11T15:29:29Z
DOC: Improved the docstring of pandas.Series.dt.to_pytimedelta
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index c5b300848876e..8e61edd6c4029 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -157,6 +157,39 @@ class TimedeltaProperties(Properties): """ def to_pytimedelta(self): + """ + Return an array of native `datetime.timedelta` objects. + + Python's standard `datetime` library uses a different representation + timedelta's. This method converts a Series of pandas Timedeltas + to `datetime.timedelta` format with the same length as the original + Series. + + Returns + ------- + a : numpy.ndarray + 1D array containing data with `datetime.timedelta` type. + + Examples + -------- + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.to_pytimedelta() + array([datetime.timedelta(0), datetime.timedelta(1), + datetime.timedelta(2), datetime.timedelta(3), + datetime.timedelta(4)], dtype=object) + + See Also + -------- + datetime.timedelta + """ return self._get_values().to_pytimedelta() @property
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X ] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ```################################################################################ ################# Docstring (pandas.Series.dt.to_pytimedelta) ################# ################################################################################ Return an array of the Timedeltas in `datetime.timedelta` format with the same length as the original Series. Returns ------- a : numpy.ndarray 1D array containing data with `datetime.timedelta` type Examples -------- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) >>> s 0 0 days 1 1 days 2 2 days 3 3 days 4 4 days dtype: timedelta64[ns] >>> s.dt.to_pytimedelta() array([datetime.timedelta(0), datetime.timedelta(1), datetime.timedelta(2), datetime.timedelta(3), datetime.timedelta(4)], dtype=object) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No summary found (a short summary in a single line should be present at the beginning of the docstring) See Also section not found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. No extended summary needed. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20142
2018-03-10T14:03:36Z
2018-03-17T12:09:50Z
2018-03-17T12:09:50Z
2018-03-17T12:10:11Z
DOC: Improved the docstring of str.extract() (Delhi)
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 93e6f8a53c804..9028ce1a77304 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -840,19 +840,22 @@ def _str_extract_frame(arr, pat, flags=0): def str_extract(arr, pat, flags=0, expand=True): r""" + Extract capture groups in the regex `pat` as columns in a DataFrame. + For each subject string in the Series, extract groups from the - first match of regular expression pat. + first match of regular expression `pat`. Parameters ---------- pat : string - Regular expression pattern with capturing groups + Regular expression pattern with capturing groups. flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - + ``re`` module flags, e.g. ``re.IGNORECASE``. + See :mod:`re` expand : bool, default True - * If True, return DataFrame. - * If False, return Series/Index/DataFrame. + If True, return DataFrame with one column per capture group. + If False, return a Series/Index if there is one capture group + or DataFrame if there are multiple capture groups. .. versionadded:: 0.18.0 @@ -875,7 +878,7 @@ def str_extract(arr, pat, flags=0, expand=True): A pattern with two groups will return a DataFrame with two columns. Non-matches will be NaN. - >>> s = Series(['a1', 'b2', 'c3']) + >>> s = pd.Series(['a1', 'b2', 'c3']) >>> s.str.extract(r'([ab])(\d)') 0 1 0 a 1 @@ -914,7 +917,6 @@ def str_extract(arr, pat, flags=0, expand=True): 1 2 2 NaN dtype: object - """ if not isinstance(expand, bool): raise ValueError("expand must be True or False")
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ #################### Docstring (pandas.Series.str.extract) #################### ################################################################################ Return the match object corresponding to regex `pat`. For each subject string in the Series, extract groups from the first match of regular expression `pat`. Parameters ---------- pat : string Regular expression pattern with capturing groups. flags : int, default 0 (no flags) Re module flags, e.g. re.IGNORECASE. expand : bool, default True If True, return DataFrame, else return Series/Index/DataFrame. .. versionadded:: 0.18.0. Returns ------- DataFrame with one row for each subject string, and one column for each group. Any capture group names in regular expression pat will be used for column names; otherwise capture group numbers will be used. The dtype of each result column is always object, even when no match is found. If expand=False and pat has only one capture group, then return a Series (if subject is a Series) or Index (if subject is an Index). See Also -------- extractall : returns all matches (not just the first match) Examples -------- A pattern with two groups will return a DataFrame with two columns. Non-matches will be NaN. >>> s = pd.Series(['a1', 'b2', 'c3']) >>> s.str.extract(r'([ab])(\d)') 0 1 0 a 1 1 b 2 2 NaN NaN A pattern may contain optional groups. >>> s.str.extract(r'([ab])?(\d)') 0 1 0 a 1 1 b 2 2 NaN 3 Named groups will become column names in the result. >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)') letter digit 0 a 1 1 b 2 2 NaN NaN A pattern with one group will return a DataFrame with one column if expand=True. >>> s.str.extract(r'[ab](\d)', expand=True) 0 0 1 1 2 2 NaN A pattern with one group will return a Series if expand=False. >>> s.str.extract(r'[ab](\d)', expand=False) 0 1 1 2 2 NaN dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.str.extract" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20141
2018-03-10T14:02:51Z
2018-07-07T16:26:11Z
2018-07-07T16:26:11Z
2018-07-07T16:26:22Z
DOC: improved the docstring of pandas.Index.min()
diff --git a/pandas/core/base.py b/pandas/core/base.py index 280b8849792e3..fc6550d45b0c3 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -801,7 +801,35 @@ def argmax(self, axis=None): return nanops.nanargmax(self.values) def min(self): - """ The minimum value of the object """ + """ + Return the minimum value of the Index. + + Returns + ------- + scalar + Minimum value. + + See Also + -------- + Index.max : Return the maximum value of the object. + Series.min : Return the minimum value in a Series. + DataFrame.min : Return the minimum values in a DataFrame. + + Examples + -------- + >>> idx = pd.Index([3, 2, 1]) + >>> idx.min() + 1 + + >>> idx = pd.Index(['c', 'b', 'a']) + >>> idx.min() + 'a' + + For a MultiIndex, the minimum is determined lexicographically. + >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) + >>> idx.min() + ('a', 1) + """ return nanops.nanmin(self.values) def argmin(self, axis=None):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` Docstring for "pandas.Index.min" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20140
2018-03-10T13:59:36Z
2018-03-16T21:16:29Z
2018-03-16T21:16:29Z
2018-03-16T21:16:47Z
DOC: pandas.DateTimeIndex.to_frame
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b911f62b69e2a..e3c41c4df9a2d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1190,6 +1190,11 @@ def to_frame(self, index=True): DataFrame DataFrame containing the original Index data. + See Also + -------- + Index.to_series : Convert an Index to a Series. + Series.to_frame : Convert Series to DataFrame. + Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ X] PR title is "DOC: pandas.DateTimeIndex.to_frame" - [X ] The validation script passes: `./scripts/validate_docstrings.py pandas.DatetimeIndex.to_frame` - [ X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X ] The html version looks good: `python doc/make.py --single pandas.DatetimeIndex.to_frame` - [ X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.DatetimeIndex.to_frame) ################## ################################################################################ Create a DataFrame with a column containing the Index. .. versionadded:: 0.21.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original Index. Returns ------- DataFrame DataFrame containing the original Index data. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow Application to datetimes >>> dt = ['04/27/2008 05:28 AM', '04/22/2008 07:19 AM', ... '10/09/2008 01:44 PM', '12/25/2008 09:50 AM', ... '08/19/2008 07:30 PM', '03/07/2008 11:04 PM', ... '12/22/2008 12:08 PM', '07/01/2008 05:36 AM', ... '09/06/2008 04:53 AM', '01/23/2008 07:43 PM'] >>> con_dt = pd.to_datetime(dt) >>> df = con_dt.to_frame(index=True) >>> df[0] 2008-04-27 05:28:00 2008-04-27 05:28:00 2008-04-22 07:19:00 2008-04-22 07:19:00 2008-10-09 13:44:00 2008-10-09 13:44:00 2008-12-25 09:50:00 2008-12-25 09:50:00 2008-08-19 19:30:00 2008-08-19 19:30:00 2008-03-07 23:04:00 2008-03-07 23:04:00 2008-12-22 12:08:00 2008-12-22 12:08:00 2008-07-01 05:36:00 2008-07-01 05:36:00 2008-09-06 04:53:00 2008-09-06 04:53:00 2008-01-23 19:43:00 2008-01-23 19:43:00 Name: 0, dtype: datetime64[ns] See Also -------- pandas.Series.to_frame : Convert Series to DataFrame ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DatetimeIndex.to_frame" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20139
2018-03-10T13:57:25Z
2018-03-16T21:22:50Z
2018-03-16T21:22:50Z
2018-03-16T21:22:50Z
DOC: update the pandas.DataFrame.isna and pandas.Series.isna docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 397726181d2fb..5dc17b73b7003 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5515,13 +5515,63 @@ def asof(self, where, subset=None): # Action Methods _shared_docs['isna'] = """ + Detect missing values. + Return a boolean same-sized object indicating if the values are NA. + NA values, such as None or :attr:`numpy.NaN`, gets mapped to True + values. + Everything else gets mapped to False values. Characters such as empty + strings `''` or :attr:`numpy.inf` are not considered NA values + (unless you set ``pandas.options.mode.use_inf_as_na = True``). + + Returns + ------- + %(klass)s + Mask of bool values for each element in %(klass)s that + indicates whether an element is not an NA value. See Also -------- - %(klass)s.notna : boolean inverse of isna %(klass)s.isnull : alias of isna + %(klass)s.notna : boolean inverse of isna + %(klass)s.dropna : omit axes labels with missing values isna : top-level isna + + Examples + -------- + Show which entries in a DataFrame are NA. + + >>> df = pd.DataFrame({'age': [5, 6, np.NaN], + ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), + ... pd.Timestamp('1940-04-25')], + ... 'name': ['Alfred', 'Batman', ''], + ... 'toy': [None, 'Batmobile', 'Joker']}) + >>> df + age born name toy + 0 5.0 NaT Alfred None + 1 6.0 1939-05-27 Batman Batmobile + 2 NaN 1940-04-25 Joker + + >>> df.isna() + age born name toy + 0 False True False True + 1 False False False False + 2 True False False False + + Show which entries in a Series are NA. + + >>> ser = pd.Series([5, 6, np.NaN]) + >>> ser + 0 5.0 + 1 6.0 + 2 NaN + dtype: float64 + + >>> ser.isna() + 0 False + 1 False + 2 True + dtype: bool """ @Appender(_shared_docs['isna'] % _shared_doc_kwargs)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Two Validations for pandas.DataFrame.isna and pandas.Series.isna (shared docs). ``` ################################################################################ ###################### Docstring (pandas.DataFrame.isna) ###################### ################################################################################ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, get mapped to True values. Everything else get mapped to False values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`). Returns ------- bool of type DataFrame Mask of True/False values for each element in DataFrame that indicates whether an element is an NA value See Also -------- DataFrame.isnull : alias of isna DataFrame.notna : boolean inverse of isna DataFrame.dropna : omit axes labels with missing values isna : top-level isna Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame({'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], ... 'toy': [None, 'Batmobile', 'Joker']}) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.isna" correct. :) ``` ``` ################################################################################ ######################## Docstring (pandas.Series.isna) ######################## ################################################################################ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, get mapped to True values. Everything else get mapped to False values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`). Returns ------- bool of type Series Mask of True/False values for each element in Series that indicates whether an element is an NA value See Also -------- Series.isnull : alias of isna Series.notna : boolean inverse of isna Series.dropna : omit axes labels with missing values isna : top-level isna Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame({'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], ... 'toy': [None, 'Batmobile', 'Joker']}) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.isna" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20138
2018-03-10T13:55:00Z
2018-03-13T12:51:09Z
2018-03-13T12:51:09Z
2018-03-13T12:51:24Z
DOC: update pandas.Series.rename_axis
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..e276cc9abfdf2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -913,20 +913,25 @@ def f(x): rename.__doc__ = _shared_docs['rename'] def rename_axis(self, mapper, axis=0, copy=True, inplace=False): - """Alter the name of the index or columns. + """ + Alter the name of the index or columns. Parameters ---------- mapper : scalar, list-like, optional - Value to set the axis name attribute. - axis : int or string, default 0 + Value to set as the axis name attribute. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The index or the name of the axis. copy : boolean, default True - Also copy underlying data + Also copy underlying data. inplace : boolean, default False + Modifies the object directly, instead of creating a new Series + or DataFrame. Returns ------- - renamed : type of caller or None if inplace=True + renamed : Series, DataFrame, or None + The same type as the caller or None if `inplace` is True. Notes ----- @@ -937,11 +942,23 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): See Also -------- - pandas.Series.rename, pandas.DataFrame.rename - pandas.Index.rename + pandas.Series.rename : Alter Series index labels or name + pandas.DataFrame.rename : Alter DataFrame index labels or name + pandas.Index.rename : Set new names on index Examples -------- + **Series** + + >>> s = pd.Series([1, 2, 3]) + >>> s.rename_axis("foo") + foo + 0 1 + 1 2 + 2 3 + dtype: int64 + + **DataFrame** >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename_axis("foo") @@ -956,7 +973,6 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): 0 1 4 1 2 5 2 3 6 - """ inplace = validate_bool_kwarg(inplace, 'inplace') non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ############# Docstring (pandas.core.generic.NDFrame.rename_axis) ############# ################################################################################ Alter the name of the index or columns. This function alters the name of the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. axis : int, str, default 0 The index or the name of the axis. copy : boolean, default True Also copy underlying data. inplace : boolean, default False Modifies the mapper in place. Returns ------- type of caller or None if inplace=True Renamed Notes ----- Prior to version 0.21.0, ``rename_axis`` could also be used to change the axis *labels* by passing a mapping or scalar. This behavior is deprecated and will be removed in a future version. Use ``rename`` instead. See Also -------- pandas.Series.rename : Alter Series index labels or name pandas.DataFrame.rename : Alter DataFrame index labels or name pandas.Index.rename : Set new names on index Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename_axis("foo") A B foo 0 1 4 1 2 5 2 3 6 >>> df.rename_axis("bar", axis="columns") bar A B 0 1 4 1 2 5 2 3 6 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.core.generic.NDFrame.rename_axis" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20137
2018-03-10T13:51:44Z
2018-03-13T21:22:54Z
2018-03-13T21:22:54Z
2018-03-13T21:22:58Z
DOC: update the DataFrame.applymap docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..16575815a91d2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5005,39 +5005,52 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, def applymap(self, func): """ - Apply a function to a DataFrame that is intended to operate - elementwise, i.e. like doing map(func, series) for each series in the - DataFrame + Apply a function to a Dataframe elementwise. + + This method applies a function that accepts and returns a scalar + to every element of a DataFrame. Parameters ---------- - func : function - Python function, returns a single value from a single value - - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(3, 3)) - >>> df - 0 1 2 - 0 -0.029638 1.081563 1.280300 - 1 0.647747 0.831136 -1.549481 - 2 0.513416 -0.884417 0.195343 - >>> df = df.applymap(lambda x: '%.2f' % x) - >>> df - 0 1 2 - 0 -0.03 1.08 1.28 - 1 0.65 0.83 -1.55 - 2 0.51 -0.88 0.20 + func : callable + Python function, returns a single value from a single value. Returns ------- - applied : DataFrame + DataFrame + Transformed DataFrame. See also -------- - DataFrame.apply : For operations on rows/columns + DataFrame.apply : Apply a function along input axis of DataFrame + + Examples + -------- + >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) + >>> df + 0 1 + 0 1.000 2.120 + 1 3.356 4.567 + + >>> df.applymap(lambda x: len(str(x))) + 0 1 + 0 3 4 + 1 5 5 + + Note that a vectorized version of `func` often exists, which will + be much faster. You could square each number elementwise. + + >>> df.applymap(lambda x: x**2) + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 + + But it's better to avoid applymap in that case. + >>> df ** 2 + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` python scripts/validate_docstrings.py pandas.DataFrame.applymap ################################################################################ #################### Docstring (pandas.DataFrame.applymap) #################### ################################################################################ Apply a function to a Dataframe elementwise. This method applies a function to a DataFrame that is intended to operate elementwise, i.e. like doing `map(func, series)` for each series in the DataFrame. Parameters ---------- func : function Python function, returns a single value from a single value. Returns ------- pandas.DataFrame A transformed DataFrame. See also -------- DataFrame.apply : Apply a function along input axis of DataFrame Examples -------- >>> df = pd.DataFrame([[1.,2.], [3.,4.]]) >>> df 0 1 0 1.0 2.0 1 3.0 4.0 >>> df = df.applymap(lambda x: x**2) >>> df 0 1 0 1.0 4.0 1 9.0 16.0 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.applymap" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20136
2018-03-10T13:49:17Z
2018-03-16T21:33:16Z
2018-03-16T21:33:16Z
2018-03-16T21:33:16Z
DOC: improved the docstring of pandas.Series.clip_upper improved
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 56c71c6c19670..605db412e5a5f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6547,25 +6547,63 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, def clip_upper(self, threshold, axis=None, inplace=False): """ - Return copy of input with values above given value(s) truncated. + Return copy of the input with values above given value(s) truncated. + + It truncates values above a certain threshold. Threshold can be a + single value or an array, in the latter case it performs the truncation + element-wise. Parameters ---------- - threshold : float or array_like + threshold : float or array-like + Maximum value allowed. All values above threshold will be set to + this value. axis : int or string axis name, optional Align object with threshold along the given axis. inplace : boolean, default False - Whether to perform the operation in place on the data + Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 See Also -------- - clip + clip : Return input copy with values below/above thresholds truncated. + clip_lower : Method to truncate values below given thresholds. Returns ------- clipped : same type as input + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4, 5]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + 4 5 + dtype: int64 + + >>> s.clip_upper(3) + 0 1 + 1 2 + 2 3 + 3 3 + 4 3 + dtype: int64 + + >>> t = [5, 4, 3, 2, 1] + >>> t + [5, 4, 3, 2, 1] + + >>> s.clip_upper(t) + 0 1 + 1 2 + 2 3 + 3 2 + 4 1 + dtype: int64 """ return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
(pandas_dev) C:\Users\Jaime\repos\MadrisSprintPandas\pandas\scripts>python validate_docstrings.py pandas.Series.clip_upper ################################################################################ ##################### Docstring (pandas.Series.clip_upper) ##################### ################################################################################ Return copy of the input with values above given value(s) truncated. It truncates values above a certain threshold. Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : float or array_like Maximum value allowed. All values above threshold will be set to this value. axis : int or string axis name, optional Align object with threshold along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data. See Also -------- clip : Return copy of input with values below/above thresholds truncated. clip_lower : Return copy of input with values below given thresholds. Returns ------- clipped : same type as input Examples -------- >>> s = pd.Series([1,2,3,4,5,6,7]) >>> s 0 1 1 2 2 3 3 4 4 5 5 6 6 7 dtype: int64 >>> s.clip_upper(4) 0 1 1 2 2 3 3 4 4 4 5 4 6 4 dtype: int64 >>> t = [4,8,7,2,5,4,6] >>> t [4, 8, 7, 2, 5, 4, 6] >>> s.clip_upper(t) 0 1 1 2 2 3 3 2 4 5 5 4 6 6 dtype: int64 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.clip_upper" correct. :)
https://api.github.com/repos/pandas-dev/pandas/pulls/20135
2018-03-10T13:46:47Z
2018-07-08T14:48:25Z
2018-07-08T14:48:25Z
2018-07-08T14:48:29Z
DOC: update the pandas.core.generic.NDFrame.to_clipboard docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..cfb3e2fe85439 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1929,26 +1929,58 @@ def to_pickle(self, path, compression='infer', protocol=protocol) def to_clipboard(self, excel=True, sep=None, **kwargs): - """ - Attempt to write text representation of object to the system clipboard + r""" + Copy object to the system clipboard. + + Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- - excel : boolean, defaults to True - if True, use the provided separator, writing in a csv - format for allowing easy pasting into excel. - if False, write a string representation of the object - to the clipboard - sep : optional, defaults to tab - other keywords are passed to to_csv + excel : bool, default True + - True, use the provided separator, writing in a csv format for + allowing easy pasting into excel. + - False, write a string representation of the object to the + clipboard. + + sep : str, default ``'\t'`` + Field delimiter. + **kwargs + These parameters will be passed to DataFrame.to_csv. + + See Also + -------- + DataFrame.to_csv : Write a DataFrame to a comma-separated values + (csv) file. + read_clipboard : Read text from clipboard and pass to read_table. Notes ----- - Requirements for your platform - - Linux: xclip, or xsel (with gtk or PyQt4 modules) - - Windows: none - - OS X: none + Requirements for your platform. + + - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) + - Windows : none + - OS X : none + + Examples + -------- + Copy the contents of a DataFrame to the clipboard. + + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + >>> df.to_clipboard(sep=',') + ... # Wrote the following to the system clipboard: + ... # ,A,B,C + ... # 0,1,2,3 + ... # 1,4,5,6 + + We can omit the the index by passing the keyword `index` and setting + it to false. + + >>> df.to_clipboard(sep=',', index=False) + ... # Wrote the following to the system clipboard: + ... # A,B,C + ... # 1,2,3 + ... # 4,5,6 """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py pandas.core.generic.NDFrame.to_clipboard` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single pandas.core.generic.NDFrame.to_clipboard` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ############# Docstring (pandas.core.generic.NDFrame.to_clipboard) ############# ################################################################################ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool Default setting for this argument is True. If True, use the provided separator, writing in a csv format for allowing easy pasting into excel. If False, write a string representation of the object to the clipboard. sep : str, default tab Field delimiter. kwargs : optional These parameters will be passed to either :py:meth:`pandas.DataFrame.to_csv` or :py:meth:`pandas.Series.to_csv` methods depending on the Object type. See Also -------- pandas.core.frame.DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. pandas.core.series.Series.to_csv : Write a Series to a comma-separated values (csv) file. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard() We can omit the the index by passing the keyword 'index' and setting it to false. >>> df.to_clipboard(index=False) ################################################################################ ################################## Validation ################################## ################################################################################ ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20134
2018-03-10T13:45:57Z
2018-03-13T20:49:32Z
2018-03-13T20:49:31Z
2018-03-13T20:49:32Z
DOC: update the pandas.DataFrame.plot.pie docstring
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index cf3ae3c0368d3..da7c58428fb54 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -3133,19 +3133,51 @@ def area(self, x=None, y=None, **kwds): def pie(self, y=None, **kwds): """ - Pie chart + Generate a pie plot. + + A pie plot is a proportional representation of the numerical data in a + column. This function wraps :meth:`matplotlib.pyplot.pie` for the + specified column. If no column reference is passed and + ``subplots=True`` a pie plot is drawn for each numerical column + independently. Parameters ---------- - y : label or position, optional - Column to plot. - `**kwds` : optional - Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + y : int or label, optional + Label or position of the column to plot. + If not provided, ``subplots=True`` argument must be passed. + **kwds + Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + axes : matplotlib.axes.Axes or np.ndarray of them. + A NumPy array is returned when `subplots` is True. + + See Also + -------- + Series.plot.pie : Generate a pie plot for a Series. + DataFrame.plot : Make plots of a DataFrame. + + Examples + -------- + In the example below we have a DataFrame with the information about + planet's mass and radius. We pass the the 'mass' column to the + pie function to get a pie plot. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97], + ... 'radius': [2439.7, 6051.8, 6378.1]}, + ... index=['Mercury', 'Venus', 'Earth']) + >>> plot = df.plot.pie(y='mass', figsize=(5, 5)) + + .. plot:: + :context: close-figs + + >>> plot = df.plot.pie(subplots=True, figsize=(6, 3)) + """ return self(kind='pie', y=y, **kwds)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py pandas.DataFrame.plot.pie` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` Docstring for "pandas.DataFrame.plot.pie" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20133
2018-03-10T13:45:18Z
2018-03-17T09:44:36Z
2018-03-17T09:44:36Z
2019-03-12T10:30:36Z
DOC: update the docstrings of Interval and IntervalMixin
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index f969c5db5b902..5dbf509fda65e 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -20,28 +20,60 @@ cdef class IntervalMixin(object): @property def closed_left(self): """ - Return True if the Interval is closed on the left-side, else False + Check if the interval is closed on the left side. + + For the meaning of `closed` and `open` see :class:`~pandas.Interval`. + + Returns + ------- + bool + ``True`` if the Interval is closed on the left-side, else + ``False``. """ return self.closed in ('left', 'both') @property def closed_right(self): """ - Return True if the Interval is closed on the right-side, else False + Check if the interval is closed on the right side. + + For the meaning of `closed` and `open` see :class:`~pandas.Interval`. + + Returns + ------- + bool + ``True`` if the Interval is closed on the left-side, else + ``False``. """ return self.closed in ('right', 'both') @property def open_left(self): """ - Return True if the Interval is open on the left-side, else False + Check if the interval is open on the left side. + + For the meaning of `closed` and `open` see :class:`~pandas.Interval`. + + Returns + ------- + bool + ``True`` if the Interval is closed on the left-side, else + ``False``. """ return not self.closed_left @property def open_right(self): """ - Return True if the Interval is open on the right-side, else False + Check if the interval is open on the right side. + + For the meaning of `closed` and `open` see :class:`~pandas.Interval`. + + Returns + ------- + bool + ``True`` if the Interval is closed on the left-side, else + ``False``. """ return not self.closed_right @@ -88,12 +120,25 @@ cdef class Interval(IntervalMixin): closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the interval is closed on the left-side, right-side, both or neither. + closed : {'right', 'left', 'both', 'neither'}, default 'right' + Whether the interval is closed on the left-side, right-side, both or + neither. See the Notes for more detailed explanation. Notes ----- The parameters `left` and `right` must be from the same type, you must be able to compare them and they must satisfy ``left <= right``. + A closed interval (in mathematics denoted by square brackets) contains + its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the + conditions ``0 <= x <= 5``. This is what ``closed='both'`` stands for. + An open interval (in mathematics denoted by parentheses) does not contain + its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the + conditions ``0 < x < 5``. This is what ``closed='neither'`` stands for. + Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is + described by ``0 <= x < 5`` (``closed='left'``) and ``(0, 5]`` is + described by ``0 < x <= 5`` (``closed='right'``). + Examples -------- It is possible to build Intervals of different types, like numeric ones: @@ -107,12 +152,14 @@ cdef class Interval(IntervalMixin): >>> 2.5 in iv True - You can test the bounds + You can test the bounds (``closed='right'``, so ``0 < x <= 5``): >>> 0 in iv False >>> 5 in iv True + >>> 0.0001 in iv + True Calculate its length @@ -150,9 +197,10 @@ cdef class Interval(IntervalMixin): -------- IntervalIndex : An Index of Interval objects that are all closed on the same side. - cut : Bin values into discrete intervals. - qcut : Discretize values into equal-sized buckets based on rank or - based on sample quantiles. + cut : Convert continuous data into discrete bins (Categorical + of Interval objects). + qcut : Convert continuous data into bins (Categorical of Interval objects) + based on quantiles. Period : Represents a period of time. """ _typ = "interval"
Improved the docstrings of Interval (the class) and the following methods of IntervalMixin: * closed_right * closed_left * open_right * open_left Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ######################### Docstring (pandas.Interval) ######################### ################################################################################ Immutable object implementing an Interval, a bounded slice-like interval. .. versionadded:: 0.20.0 Parameters ---------- left : value Left bound for the interval. right : value Right bound for the interval. closed : {'right', 'left', 'both', 'neither'}, default 'right' Whether the interval is closed on the left-side, right-side, both or neither. A closed interval (in mathematics denoted by square brackets) contains the edge points, i.e. the closed interval `[1, 5]` consists of the points `1, 2, 3, 4, 5`. An open interval (in mathematics denoted by parentheses) does not contain the edge points, i.e. the open interval `(1, 5)` consists of the points 2, 3, 4. Intervals can also be half-open or half closed, i.e. `[1,5) = 1, 2, 3, 4` and `(1, 5] = 2, 3, 4, 5`. See also the examples section below. Examples -------- >>> iv = pd.Interval(left=0, right=5) >>> iv Interval(0, 5, closed='right') >>> 2.5 in iv True >>> 0 in iv False >>> 5 in iv True >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01'), ... pd.Timestamp('2017-12-31'), closed='both') >>> pd.Timestamp('2017-01-01 00:00') in year_2017 True See Also -------- IntervalIndex : An Index of Interval objects that are all closed on the same side. cut : Convert arrays of continuous data into bins of Series/Categoricals of Interval. qcut : Convert arrays of continuous data into bins of Series/Categoricals of Interval based on quantiles. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Unknown parameters {'left', 'right', 'closed'} No returns section found (venv) [akoeltringer@client7 pandas]$ python scripts/validate_docstrings.py pandas.Interval.closed_left ################################################################################ ################### Docstring (pandas.Interval.closed_left) ################### ################################################################################ Check if the interval is closed on the left side. For the meaning of `closed` and `open` see :class:`~pandas.Interval`. Returns ------- bool `True` if the Interval is closed on the left-side, else `False`. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found No examples section found (venv) [akoeltringer@client7 pandas]$ python scripts/validate_docstrings.py pandas.Interval.closed_right ################################################################################ ################### Docstring (pandas.Interval.closed_right) ################### ################################################################################ Check if the interval is closed on the right side. For the meaning of `closed` and `open` see :class:`~pandas.Interval`. Returns ------- bool `True` if the Interval is closed on the right-side, else `False`. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found No examples section found (venv) [akoeltringer@client7 pandas]$ python scripts/validate_docstrings.py pandas.Interval.open_left ################################################################################ #################### Docstring (pandas.Interval.open_left) #################### ################################################################################ Check if the interval is open on the left side. For the meaning of `closed` and `open` see :class:`~pandas.Interval`. Returns ------- bool `True` if the Interval is open on the left-side, else `False`. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found No examples section found (venv) [akoeltringer@client7 pandas]$ python scripts/validate_docstrings.py pandas.Interval.open_right ################################################################################ #################### Docstring (pandas.Interval.open_right) #################### ################################################################################ Check if the interval is open on the right side. For the meaning of `closed` and `open` see :class:`~pandas.Interval`. Returns ------- bool `True` if the Interval is open on the right-side, else `False`. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: See Also section not found No examples section found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. * It is not clear why the error for ``pandas.Interval`` (``Unknown parameters {'left', 'right', 'closed'}``) arises - respondents to this question on Gitter were not sure either * the ``closed_right``, ``closed_left``, ``open_right`` and ``open_left`` methods have no 'examples' and no 'see also' section because for that the reader is referred to the ``Interval`` class.
https://api.github.com/repos/pandas-dev/pandas/pulls/20132
2018-03-10T13:44:49Z
2018-03-14T14:21:26Z
2018-03-14T14:21:26Z
2018-03-14T14:41:20Z
DOC: update pandas.DatetimeIndex.to_period docstring(Nairobi)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e5e9bba269fd4..8b16391a8e8cd 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1057,7 +1057,47 @@ def to_pydatetime(self): def to_period(self, freq=None): """ - Cast to PeriodIndex at a particular frequency + Cast to PeriodIndex at a particular frequency. + + Converts DatetimeIndex to PeriodIndex. + + Parameters + ---------- + freq : string or Offset, optional + One of pandas' :ref:`offset strings <timeseries.offset_aliases>` + or an Offset object. Will be inferred by default. + + Returns + ------- + PeriodIndex + + Raises + ------ + ValueError + When converting a DatetimeIndex with non-regular values, so that a + frequency cannot be inferred. + + Examples + -------- + >>> df = pd.DataFrame({"y": [1,2,3]}, + ... index=pd.to_datetime(["2000-03-31 00:00:00", + ... "2000-05-31 00:00:00", + ... "2000-08-31 00:00:00"])) + >>> df.index.to_period("M") + PeriodIndex(['2000-03', '2000-05', '2000-08'], + dtype='period[M]', freq='M') + + Infer the daily frequency + + >>> idx = pd.date_range("2017-01-01", periods=2) + >>> idx.to_period() + PeriodIndex(['2017-01-01', '2017-01-02'], + dtype='period[D]', freq='D') + + See also + -------- + pandas.PeriodIndex: Immutable ndarray holding ordinal values + pandas.DatetimeIndex.to_pydatetime: Return DatetimeIndex as object """ from pandas.core.indexes.period import PeriodIndex @@ -1146,17 +1186,17 @@ def union(self, other): def to_perioddelta(self, freq): """ - Calculates TimedeltaIndex of difference between index - values and index converted to PeriodIndex at specified - freq. Used for vectorized offsets + Calculate TimedeltaIndex of difference between index + values and index converted to periodIndex at specified + freq. Used for vectorized offsets Parameters ---------- - freq : Period frequency + freq: Period frequency Returns ------- - y : TimedeltaIndex + y: TimedeltaIndex """ return to_timedelta(self.asi8 - self.to_period(freq) .to_timestamp().asi8)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ x] PR title is "DOC: update the pandas.DatetimeIndex.to_period docstring" - [ x] The validation script passes: `scripts/validate_docstrings.py pandas.DatetimeIndex.to_period ` - [ x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ x] The html version looks good: `python doc/make.py --single pandas.DatetimeIndex.to_period ` - [ x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.DatetimeIndex.to_period) ################## ################################################################################ Cast to PeriodIndex at a particular frequency. Converts DatetimeIndex to PeriodIndex. Parameters ---------- freq : string or pandas offset object, optional One of pandas date offset string or corresponding objects. Returns ------- period : DatetimeIndex Examples -------- >>> df = pd.DataFrame({"y": [1,2,3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index = df.index.to_period("M") >>> df y 2000-03 1 2000-05 2 2000-08 3 See also -------- pandas.PeriodIndex: Immutable ndarray holding ordinal values pandas.DatetimeIndex.to_pydatetime: Return DatetimeIndex as object ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DatetimeIndex.to_period" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20131
2018-03-10T13:44:23Z
2018-03-12T16:18:49Z
2018-03-12T16:18:49Z
2018-03-12T16:18:50Z
DOC: Docstring for pandas.index.max
diff --git a/pandas/core/base.py b/pandas/core/base.py index fd039480fc6f1..9b50687edecd8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -787,7 +787,36 @@ def empty(self): return not self.size def max(self): - """ The maximum value of the object """ + """ + Return the maximum value of the Index. + + Returns + ------- + scalar + Maximum value. + + See Also + -------- + Index.min : Return the minimum value in an Index. + Series.max : Return the maximum value in a Series. + DataFrame.max : Return the maximum values in a DataFrame. + + Examples + -------- + >>> idx = pd.Index([3, 2, 1]) + >>> idx.max() + 3 + + >>> idx = pd.Index(['c', 'b', 'a']) + >>> idx.max() + 'c' + + For a MultiIndex, the maximum is determined lexicographically. + + >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) + >>> idx.max() + ('b', 2) + """ return nanops.nanmax(self.values) def argmax(self, axis=None):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ######################### Docstring (pandas.Index.max) ######################### ################################################################################ Return the maximum value of the object. Return the maximum value of the object within the same type. Returns ------- scalar Maximum value. See Also -------- Series.max : Return the maximum value of the object. DataFrame.max : Return the maximum value of the object. Index.min : Return the minimum value of the object. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Index.max" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20130
2018-03-10T13:38:00Z
2018-03-16T21:12:46Z
2018-03-16T21:12:46Z
2018-03-17T18:12:03Z
DOC: update the parquet docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..8151d6ca3b193 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1697,10 +1697,15 @@ def to_parquet(self, fname, engine='auto', compression='snappy', .. versionadded:: 0.21.0 + This function writes the dataframe as a `parquet file + <https://parquet.apache.org/>`_. You can choose different parquet + backends, and have the option of compression. See + :ref:`the user guide <io.parquet>` for more details. + Parameters ---------- fname : str - string file path + String file path. engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` @@ -1708,8 +1713,31 @@ def to_parquet(self, fname, engine='auto', compression='snappy', 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. - kwargs - Additional keyword arguments passed to the engine + **kwargs + Additional arguments passed to the parquet library. See + :ref:`pandas io <io.parquet>` for more details. + + See Also + -------- + read_parquet : Read a parquet file. + DataFrame.to_csv : Write a csv file. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_hdf : Write to hdf. + + Notes + ----- + This function requires either the `fastparquet + <https://pypi.python.org/pypi/fastparquet>`_ or `pyarrow + <https://arrow.apache.org/docs/python/>`_ library. + + Examples + -------- + >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) + >>> df.to_parquet('df.parquet.gzip', compression='gzip') + >>> pd.read_parquet('df.parquet.gzip') + col1 col2 + 0 1 3 + 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine,
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################### Docstring (pandas.DataFrame.to_parquet) ################### ################################################################################ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 Requires either fastparquet or pyarrow libraries. Parameters ---------- fname : str String file path. engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. kwargs : dict Additional keyword arguments passed to the engine. Examples ---------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', compression='gzip') Returns ---------- None See Also -------- DataFrame.to_csv : write a csv file. DataFrame.to_sql : write to a sql table. DataFrame.to_hdf : write to hdf. ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.to_parquet" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20129
2018-03-10T13:36:43Z
2018-03-12T11:03:35Z
2018-03-12T11:03:35Z
2018-03-12T11:04:02Z
DOC: updated the examples in date_range function of Pandas
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b9c4b59536d0c..3fc9478ed457d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2206,36 +2206,34 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, """ Return a fixed frequency DatetimeIndex. - The default frequency is day (calendar). + Exactly two of the three parameters `start`, `end` and `periods` + must be specified. Parameters ---------- - start : string or datetime-like, default None + start : str or datetime-like, optional Left bound for generating dates. - end : string or datetime-like, default None + end : str or datetime-like, optional Right bound for generating dates. - periods : integer, default None + periods : integer, optional Number of periods to generate. - freq : string or DateOffset, default 'D' (calendar daily) - Frequency strings can have multiples, e.g. '5H'. - tz : string, default None + freq : str or DateOffset, default 'D' (calendar daily) + Frequency strings can have multiples, e.g. '5H'. See + :ref:`here <timeseries.offset_aliases>` for a list of + frequency aliases. + tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example - Asia/Hong_Kong. + 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is + timezone-naive. normalize : bool, default False Normalize start/end dates to midnight before generating date range. - name : string, default None + name : str, default None Name of the resulting DatetimeIndex. - closed : string, default None + closed : {None, 'left', 'right'}, optional Make the interval closed with respect to the given frequency to - the 'left', 'right', or both sides (None). - - Notes - ----- - Of the three parameters: ``start``, ``end``, and ``periods``, exactly two - must be specified. - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. + the 'left', 'right', or both sides (None, the default). + **kwargs + For compatibility. Has no effect on the result. Returns ------- @@ -2243,19 +2241,87 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, See Also -------- + pandas.DatetimeIndex : An immutable container for datetimes. pandas.period_range : Return a fixed frequency PeriodIndex. pandas.interval_range : Return a fixed frequency IntervalIndex. Examples -------- - >>> pd.date_range('2018-10-03', periods=2) # doctest: +NORMALIZE_WHITESPACE - DatetimeIndex(['2018-10-03', '2018-10-04'], dtype='datetime64[ns]', - freq='D') - - >>> pd.date_range(start='2018-01-01', end='20180103') - ... # doctest: +NORMALIZE_WHITESPACE - DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], - dtype='datetime64[ns]', freq='D') + **Specifying the values** + + The next three examples generate the same `DatetimeIndex`, but vary + the combination of `start`, `end` and `periods`. + + Specify `start` and `end`, with the default daily frequency. + + >>> pd.date_range(start='1/1/2018', end='1/08/2018') + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', + '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], + dtype='datetime64[ns]', freq='D') + + Specify `start` and `periods`, the number of periods (days). + + >>> pd.date_range(start='1/1/2018', periods=8) + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', + '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], + dtype='datetime64[ns]', freq='D') + + Specify `end` and `periods`, the number of periods (days). + + >>> pd.date_range(end='1/1/2018', periods=8) + DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', + '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + **Other Parameters** + + Changed the `freq` (frequency) to ``'M'`` (month end frequency). + + >>> pd.date_range(start='1/1/2018', periods=5, freq='M') + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', + '2018-05-31'], + dtype='datetime64[ns]', freq='M') + + Multiples are allowed + + >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') + DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', + '2019-01-31'], + dtype='datetime64[ns]', freq='3M') + + `freq` can also be specified as an Offset object. + + >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) + DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', + '2019-01-31'], + dtype='datetime64[ns]', freq='3M') + + Specify `tz` to set the timezone. + + >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') + DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', + '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', + '2018-01-05 00:00:00+09:00'], + dtype='datetime64[ns, Asia/Tokyo]', freq='D') + + `closed` controls whether to include `start` and `end` that are on the + boundary. The default includes boundary points on either end. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) + DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], + dtype='datetime64[ns]', freq='D') + + Use ``closed='left'`` to exclude `end` if it falls on the boundary. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') + DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], + dtype='datetime64[ns]', freq='D') + + Use ``closed='right'`` to exclude `start` if it falls on the boundary. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') + DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], + dtype='datetime64[ns]', freq='D') """ return DatetimeIndex(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name,
I have updated some little typos of full stop that I came across while validating and added some examples for `date_range` function in pandas/core/indexes/datetimes.py . Please review this and let me know if I can make it better. thanks
https://api.github.com/repos/pandas-dev/pandas/pulls/20128
2018-03-10T13:29:00Z
2018-03-16T21:06:57Z
2018-03-16T21:06:57Z
2018-03-16T21:06:57Z
DOC: update the pandas.DataFrame.to_sql docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..fcc551b3ac2e4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1865,33 +1865,108 @@ def to_sql(self, name, con, schema=None, if_exists='fail', index=True, """ Write records stored in a DataFrame to a SQL database. + Databases supported by SQLAlchemy [1]_ are supported. Tables can be + newly created, appended to, or overwritten. + Parameters ---------- name : string - Name of SQL table - con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + Name of SQL table. + con : sqlalchemy.engine.Engine or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that - library. If a DBAPI2 object, only sqlite3 is supported. - schema : string, default None + library. Legacy support is provided for sqlite3.Connection objects. + schema : string, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' - - fail: If table exists, do nothing. - - replace: If table exists, drop it, recreate it, and insert data. - - append: If table exists, insert data. Create if does not exist. + How to behave if the table already exists. + + * fail: Raise a ValueError. + * replace: Drop the table before inserting new values. + * append: Insert new values to the existing table. + index : boolean, default True - Write DataFrame index as a column. + Write DataFrame index as a column. Uses `index_label` as the column + name in the table. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - chunksize : int, default None - If not None, then rows will be written in batches of this size at a - time. If None, all rows will be written at once. - dtype : dict of column name to SQL type, default None - Optional specifying the datatype for columns. The SQL type should - be a SQLAlchemy type, or a string for sqlite3 fallback connection. + chunksize : int, optional + Rows will be written in batches of this size at a time. By default, + all rows will be written at once. + dtype : dict, optional + Specifying the datatype for columns. The keys should be the column + names and the values should be the SQLAlchemy types or strings for + the sqlite3 legacy mode. + + Raises + ------ + ValueError + When the table already exists and `if_exists` is 'fail' (the + default). + + See Also + -------- + pandas.read_sql : read a DataFrame from a table + + References + ---------- + .. [1] http://docs.sqlalchemy.org + .. [2] https://www.python.org/dev/peps/pep-0249/ + + Examples + -------- + + Create an in-memory SQLite database. + + >>> from sqlalchemy import create_engine + >>> engine = create_engine('sqlite://', echo=False) + + Create a table from scratch with 3 rows. + + >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) + >>> df + name + 0 User 1 + 1 User 2 + 2 User 3 + + >>> df.to_sql('users', con=engine) + >>> engine.execute("SELECT * FROM users").fetchall() + [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] + + >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) + >>> df1.to_sql('users', con=engine, if_exists='append') + >>> engine.execute("SELECT * FROM users").fetchall() + [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), + (0, 'User 4'), (1, 'User 5')] + + Overwrite the table with just ``df1``. + + >>> df1.to_sql('users', con=engine, if_exists='replace', + ... index_label='id') + >>> engine.execute("SELECT * FROM users").fetchall() + [(0, 'User 4'), (1, 'User 5')] + + Specify the dtype (especially useful for integers with missing values). + Notice that while pandas is forced to store the data as floating point, + the database supports nullable integers. When fetching the data with + Python, we get back integer scalars. + + >>> df = pd.DataFrame({"A": [1, None, 2]}) + >>> df + A + 0 1.0 + 1 NaN + 2 2.0 + + >>> from sqlalchemy.types import Integer + >>> df.to_sql('integers', con=engine, index=False, + ... dtype={"A": Integer()}) + >>> engine.execute("SELECT * FROM integers").fetchall() + [(1,), (None,), (2,)] """ from pandas.io import sql sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ##################### Docstring (pandas.DataFrame.to_sql) ##################### ################################################################################ Write records stored in a DataFrame to a SQL database. This function inserts all rows of the dataframe into the given table and recreates it if if_exists='replace'. Databases supported by SQLAlchemy or DBAPI2 are also supported. Parameters ---------- name : string Name of SQL table. con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' Accepted values: - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. Returns -------- None See Also -------- pandas.read_sql_query : read a DataFrame from a table Examples -------- >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///example.db', echo=False) >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> # create a table from scratch with 3 rows >>> df.to_sql('users', con=engine, if_exists='replace') >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) >>> # 2 new rows inserted >>> df1.to_sql('users', con=engine, if_exists='append') >>> # table will be recreated and 5 rows inserted >>> df = pd.concat([df, df1], ignore_index=True) >>> df.to_sql('users', con=engine, if_exists='replace') >>> pd.read_sql_query("select * from users",con=engine) index name 0 0 User 1 1 1 User 2 2 2 User 3 3 3 User 4 4 4 User 5 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.to_sql" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20126
2018-03-10T13:26:04Z
2018-03-13T20:27:31Z
2018-03-13T20:27:31Z
2018-03-13T20:27:36Z
DOC: Improved the docstring of pandas.Series.truncate
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..c056d160e91bf 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6888,29 +6888,37 @@ def tshift(self, periods=1, freq=None, axis=0): def truncate(self, before=None, after=None, axis=None, copy=True): """ - Truncates a sorted DataFrame/Series before and/or after some - particular index value. If the axis contains only datetime values, - before/after parameters are converted to datetime values. + Truncate a Series or DataFrame before and after some index value. + + This is a useful shorthand for boolean indexing based on index + values above or below certain thresholds. Parameters ---------- before : date, string, int - Truncate all rows before this index value + Truncate all rows before this index value. after : date, string, int - Truncate all rows after this index value - axis : {0 or 'index', 1 or 'columns'} - - * 0 or 'index': apply truncation to rows - * 1 or 'columns': apply truncation to columns - - Default is stat axis for given data type (0 for Series and - DataFrames, 1 for Panels) + Truncate all rows after this index value. + axis : {0 or 'index', 1 or 'columns'}, optional + Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, - return a copy of the truncated section + Return a copy of the truncated section. Returns ------- - truncated : type of caller + type of caller + The truncated Series or DataFrame. + + See Also + -------- + DataFrame.loc : Select a subset of a DataFrame by label. + DataFrame.iloc : Select a subset of a DataFrame by position. + + Notes + ----- + If the index being truncated contains only datetime values, + `before` and `after` may be specified as strings instead of + Timestamps. Examples -------- @@ -6918,28 +6926,63 @@ def truncate(self, before=None, after=None, axis=None, copy=True): ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) + >>> df + A B C + 1 a f k + 2 b g l + 3 c h m + 4 d i n + 5 e j o + >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n - >>> df = pd.DataFrame({'A': [1, 2, 3, 4, 5], - ... 'B': [6, 7, 8, 9, 10], - ... 'C': [11, 12, 13, 14, 15]}, - ... index=['a', 'b', 'c', 'd', 'e']) - >>> df.truncate(before='b', after='d') - A B C - b 2 7 12 - c 3 8 13 - d 4 9 14 - The index values in ``truncate`` can be datetimes or string - dates. Note that ``truncate`` assumes a 0 value for any unspecified - date component in a ``DatetimeIndex`` in contrast to slicing which - returns any partially matching dates. + The columns of a DataFrame can be truncated. + >>> df.truncate(before="A", after="B", axis="columns") + A B + 1 a f + 2 b g + 3 c h + 4 d i + 5 e j + + For Series, only rows can be truncated. + + >>> df['A'].truncate(before=2, after=4) + 2 b + 3 c + 4 d + Name: A, dtype: object + + The index values in ``truncate`` can be datetimes or string + dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) + >>> df.tail() + A + 2016-01-31 23:59:56 1 + 2016-01-31 23:59:57 1 + 2016-01-31 23:59:58 1 + 2016-01-31 23:59:59 1 + 2016-02-01 00:00:00 1 + + >>> df.truncate(before=pd.Timestamp('2016-01-05'), + ... after=pd.Timestamp('2016-01-10')).tail() + A + 2016-01-09 23:59:56 1 + 2016-01-09 23:59:57 1 + 2016-01-09 23:59:58 1 + 2016-01-09 23:59:59 1 + 2016-01-10 00:00:00 1 + + Because the index is a DatetimeIndex containing only dates, we can + specify `before` and `after` as strings. They will be coerced to + Timestamps before truncation. + >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 @@ -6947,6 +6990,11 @@ def truncate(self, before=None, after=None, axis=None, copy=True): 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 + + Note that ``truncate`` assumes a 0 value for any unspecified time + component (midnight). This differs from partial string slicing, which + returns any partially matching dates. + >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ###################### Docstring (pandas.Series.truncate) ###################### ################################################################################ Truncate a DataFrame/Series before/after some index value. If the axis contains only datetime values, before/after parameters are converted to datetime values. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'} Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). copy : boolean, default is True, Return a copy of the truncated section. Returns ------- truncated : type of caller See Also -------- DataFrame.truncate : Truncate a DataFrame before/after some index value. Series.truncate : Truncate a Series before/after some index value. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n >>> df = pd.DataFrame({'A': [1, 2, 3, 4, 5], ... 'B': [6, 7, 8, 9, 10], ... 'C': [11, 12, 13, 14, 15]}, ... index=['a', 'b', 'c', 'd', 'e']) >>> df.truncate(before='b', after='d') A B C b 2 7 12 c 3 8 13 d 4 9 14 The index values in ``truncate`` can be datetimes or string dates. Note that ``truncate`` assumes a 0 value for any unspecified date component in a ``DatetimeIndex`` in contrast to slicing which returns any partially matching dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.truncate" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20125
2018-03-10T13:24:58Z
2018-03-15T21:57:25Z
2018-03-15T21:57:25Z
2018-03-15T21:57:40Z
DOC: Update the pandas.Index.isna docstring
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index fd1d3690e8a89..f387afde24117 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2022,18 +2022,58 @@ def hasnans(self): def isna(self): """ - Detect missing values + Detect missing values. + + Return a boolean same-sized object indicating if the values are NA. + NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get + mapped to ``True`` values. + Everything else get mapped to ``False`` values. Characters such as + empty strings `''` or :attr:`numpy.inf` are not considered NA values + (unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`). .. versionadded:: 0.20.0 Returns ------- - a boolean array of whether my values are NA + numpy.ndarray + A boolean array of whether my values are NA - See also + See Also + -------- + pandas.Index.notna : boolean inverse of isna. + pandas.Index.dropna : omit entries with missing values. + pandas.isna : top-level isna. + Series.isna : detect missing values in Series object. + + Examples -------- - isnull : alias of isna - pandas.isna : top-level isna + Show which entries in a pandas.Index are NA. The result is an + array. + + >>> idx = pd.Index([5.2, 6.0, np.NaN]) + >>> idx + Float64Index([5.2, 6.0, nan], dtype='float64') + >>> idx.isna() + array([False, False, True], dtype=bool) + + Empty strings are not considered NA values. None is considered an NA + value. + + >>> idx = pd.Index(['black', '', 'red', None]) + >>> idx + Index(['black', '', 'red', None], dtype='object') + >>> idx.isna() + array([False, False, False, True], dtype=bool) + + For datetimes, `NaT` (Not a Time) is considered as an NA value. + + >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), + ... pd.Timestamp(''), None, pd.NaT]) + >>> idx + DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], + dtype='datetime64[ns]', freq=None) + >>> idx.isna() + array([False, True, True, True], dtype=bool) """ return self._isnan isnull = isna
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant ``` ################################################################################ ######################## Docstring (pandas.Index.isna) ######################## ################################################################################ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, get mapped to True values. Everything else get mapped to False values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set :attr:`pandas.options.mode.use_inf_as_na` `= True`). .. versionadded:: 0.20.0 Returns ------- numpy.ndarray A boolean array of whether my values are NA See also -------- pandas.Index.isnull : alias of isna pandas.Index.notna : boolean inverse of isna pandas.Index.dropna : omit entries with missing values pandas.isna : top-level isna Examples -------- Show which entries in a pandas.Index are NA. The result is a array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Float64Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Index.isna" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20123
2018-03-10T13:04:38Z
2018-03-12T21:17:18Z
2018-03-12T21:17:17Z
2018-03-14T10:54:12Z
DOC: improved the docstring of pandas.Series.clip_lower
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..59ec9d7340d5b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5644,7 +5644,8 @@ def clip_upper(self, threshold, axis=None, inplace=False): Align object with threshold along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data - .. versionadded:: 0.21.0 + + .. versionadded:: 0.21.0 See Also -------- @@ -5659,24 +5660,104 @@ def clip_upper(self, threshold, axis=None, inplace=False): def clip_lower(self, threshold, axis=None, inplace=False): """ - Return copy of the input with values below given value(s) truncated. + Return copy of the input with values below a threshold truncated. Parameters ---------- - threshold : float or array_like - axis : int or string axis name, optional - Align object with threshold along the given axis. + threshold : numeric or array-like + Minimum value allowed. All values below threshold will be set to + this value. + + * float : every value is compared to `threshold`. + * array-like : The shape of `threshold` should match the object + it's compared to. When `self` is a Series, `threshold` should be + the length. When `self` is a DataFrame, `threshold` should 2-D + and the same shape as `self` for ``axis=None``, or 1-D and the + same length as the axis being compared. + + axis : {0 or 'index', 1 or 'columns'}, default 0 + Align `self` with `threshold` along the given axis. + inplace : boolean, default False - Whether to perform the operation in place on the data - .. versionadded:: 0.21.0 + Whether to perform the operation in place on the data. + + .. versionadded:: 0.21.0 See Also -------- - clip + Series.clip : Return copy of input with values below and above + thresholds truncated. + Series.clip_upper : Return copy of input with values above + threshold truncated. Returns ------- clipped : same type as input + + Examples + -------- + Series single threshold clipping: + + >>> s = pd.Series([5, 6, 7, 8, 9]) + >>> s.clip_lower(8) + 0 8 + 1 8 + 2 8 + 3 8 + 4 9 + dtype: int64 + + Series clipping element-wise using an array of thresholds. `threshold` + should be the same length as the Series. + + >>> elemwise_thresholds = [4, 8, 7, 2, 5] + >>> s.clip_lower(elemwise_thresholds) + 0 5 + 1 8 + 2 7 + 3 8 + 4 9 + dtype: int64 + + DataFrames can be compared to a scalar. + + >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) + >>> df + A B + 0 1 2 + 1 3 4 + 2 5 6 + + >>> df.clip_lower(3) + A B + 0 3 3 + 1 3 4 + 2 5 6 + + Or to an array of values. By default, `threshold` should be the same + shape as the DataFrame. + + >>> df.clip_lower(np.array([[3, 4], [2, 2], [6, 2]])) + A B + 0 3 4 + 1 3 4 + 2 6 6 + + Control how `threshold` is broadcast with `axis`. In this case + `threshold` should be the same length as the axis specified by + `axis`. + + >>> df.clip_lower(np.array([3, 3, 5]), axis='index') + A B + 0 3 3 + 1 3 4 + 2 5 6 + + >>> df.clip_lower(np.array([4, 5]), axis='columns') + A B + 0 4 5 + 1 4 5 + 2 5 6 """ return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace)
``` (pandas_dev) david@david-TM1604:~/repos/pandas/scripts$ python validate_docstrings.py pandas.Series.clip_lower ################################################################################ ##################### Docstring (pandas.Series.clip_lower) ##################### ################################################################################ Return copy of the input with values below given value(s) truncated. It truncate values below a certain threshold. Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : float or array_like Minimum value allowed. All values below threshold will be set to this value. axis : int or string axis name, optional Align object with threshold along the given axis. inplace : boolean, default False Whether to perform the operation in place on the data. See Also -------- clip : Return copy of the input with values below and over given thresholds values truncated. Returns ------- clipped : same type as input Examples -------- >>> s = pd.Series([3.816052, 7.969235, 6.903116, 2.532261, 5.015296]) >>> s 0 3.816052 1 7.969235 2 6.903116 3 2.532261 4 5.015296 dtype: float64 >>> s.clip_lower(4) 0 4.000000 1 7.969235 2 6.903116 3 4.000000 4 5.015296 dtype: float64 >>> t = [4,8,7,2,5] >>> t [4, 8, 7, 2, 5] >>> s.clip_lower(t) 0 4.000000 1 8.000000 2 7.000000 3 2.532261 4 5.015296 dtype: float64 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.Series.clip_lower" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20122
2018-03-10T12:56:14Z
2018-03-16T21:03:01Z
2018-03-16T21:03:01Z
2018-03-16T21:03:07Z
DOC: updated the pandas.DataFrame.plot.hexbin docstring
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c5b22effc6486..0e28b1cbd1cad 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -3174,26 +3174,85 @@ def scatter(self, x, y, s=None, c=None, **kwds): def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwds): """ - Hexbin plot + Generate a hexagonal binning plot. + + Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None` + (the default), this is a histogram of the number of occurrences + of the observations at ``(x[i], y[i])``. + + If `C` is specified, specifies values at given coordinates + ``(x[i], y[i])``. These values are accumulated for each hexagonal + bin and then reduced according to `reduce_C_function`, + having as default the NumPy's mean function (:meth:`numpy.mean`). + (If `C` is specified, it must also be a 1-D sequence + of the same length as `x` and `y`, or a column label.) Parameters ---------- - x, y : label or position, optional - Coordinates for each point. - C : label or position, optional - The value at each `(x, y)` point. - reduce_C_function : callable, optional + x : int or str + The column label or position for x points. + y : int or str + The column label or position for y points. + C : int or str, optional + The column label or position for the value of `(x, y)` point. + reduce_C_function : callable, default `np.mean` Function of one argument that reduces all the values in a bin to - a single number (e.g. `mean`, `max`, `sum`, `std`). - gridsize : int, optional - Number of bins. - `**kwds` : optional + a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`). + gridsize : int or tuple of (int, int), default 100 + The number of hexagons in the x-direction. + The corresponding number of hexagons in the y-direction is + chosen in a way that the hexagons are approximately regular. + Alternatively, gridsize can be a tuple with two elements + specifying the number of hexagons in the x-direction and the + y-direction. + **kwds Additional keyword arguments are documented in :meth:`pandas.DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + matplotlib.AxesSubplot + The matplotlib ``Axes`` on which the hexbin is plotted. + + See Also + -------- + DataFrame.plot : Make plots of a DataFrame. + matplotlib.pyplot.hexbin : hexagonal binning plot using matplotlib, + the matplotlib function that is used under the hood. + + Examples + -------- + The following examples are generated with random data from + a normal distribution. + + .. plot:: + :context: close-figs + + >>> n = 10000 + >>> df = pd.DataFrame({'x': np.random.randn(n), + ... 'y': np.random.randn(n)}) + >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20) + + The next example uses `C` and `np.sum` as `reduce_C_function`. + Note that `'observations'` values ranges from 1 to 5 but the result + plot shows values up to more than 25. This is because of the + `reduce_C_function`. + + .. plot:: + :context: close-figs + + >>> n = 500 + >>> df = pd.DataFrame({ + ... 'coord_x': np.random.uniform(-3, 3, size=n), + ... 'coord_y': np.random.uniform(30, 50, size=n), + ... 'observations': np.random.randint(1,5, size=n) + ... }) + >>> ax = df.plot.hexbin(x='coord_x', + ... y='coord_y', + ... C='observations', + ... reduce_C_function=np.sum, + ... gridsize=10, + ... cmap="viridis") """ if reduce_C_function is not None: kwds['reduce_C_function'] = reduce_C_function
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################### Docstring (pandas.DataFrame.plot.hexbin) ################### ################################################################################ Make hexagonal binning plots. Make an hexagonal binning plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. If *C* is *None* (the default), this is an histogram of the number of occurrences of the observations at (x[i],y[i]). If *C* is specified, specifies values at given coordinates (x[i],y[i]). These values are accumulated for each hexagonal bin and then reduced according to *reduce_C_function*, having as default the numpy's mean function (np.mean). (If *C* is specified, it must also be a 1-D sequence of the same length as *x* and *y*.) Parameters ---------- x : label or position, optional Coordinates for x point. y : label or position, optional Coordinates for y point. C : label or position, optional The value at each `(x, y)` point. reduce_C_function : callable, optional, default `mean` Function of one argument that reduces all the values in a bin to a single number (e.g. `mean`, `max`, `sum`, `std`). gridsize : int, optional, default 100 The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen in a way that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them. See Also -------- matplotlib.pyplot.hexbin : hexagonal binning plot using matplotlib. Examples -------- .. plot:: :context: close-figs >>> from sklearn.datasets import load_iris >>> iris = load_iris() >>> df = pd.DataFrame(iris.data, columns=iris.feature_names) >>> hexbin = df.plot.hexbin(x='sepal length (cm)', y='sepal width (cm)', ... gridsize=10, cmap='viridis') ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.plot.hexbin" correct. :) ``` thanks to @Renton2017
https://api.github.com/repos/pandas-dev/pandas/pulls/20121
2018-03-10T12:50:44Z
2018-03-14T12:40:48Z
2018-03-14T12:40:48Z
2018-03-14T14:17:01Z
DOC: update and seperate the Series.drop and Dataframe.drop docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..8e25432ecef26 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3035,6 +3035,129 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) + def drop(self, labels=None, axis=0, index=None, columns=None, + level=None, inplace=False, errors='raise'): + """ + Drop specified labels from rows or columns. + + Remove rows or columns by specifying label names and corresponding + axis, or by specifying directly index or column names. When using a + multi-index, labels on different levels can be removed by specifying + the level. + + Parameters + ---------- + labels : single label or list-like + Index or column labels to drop. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Whether to drop labels from the index (0 or 'index') or + columns (1 or 'columns'). + index, columns : single label or list-like + Alternative to specifying axis (``labels, axis=1`` + is equivalent to ``columns=labels``). + + .. versionadded:: 0.21.0 + level : int or level name, optional + For MultiIndex, level from which the labels will be removed. + inplace : bool, default False + If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are + dropped. + + Returns + ------- + dropped : pandas.DataFrame + + See Also + -------- + DataFrame.loc : Label-location based indexer for selection by label. + DataFrame.dropna : Return DataFrame with labels on given axis omitted + where (all or any) data are missing + DataFrame.drop_duplicates : Return DataFrame with duplicate rows + removed, optionally only considering certain columns + Series.drop : Return Series with specified index labels removed. + + Raises + ------ + KeyError + If none of the labels are found in the selected axis + + Examples + -------- + >>> df = pd.DataFrame(np.arange(12).reshape(3,4), + ... columns=['A', 'B', 'C', 'D']) + >>> df + A B C D + 0 0 1 2 3 + 1 4 5 6 7 + 2 8 9 10 11 + + Drop columns + + >>> df.drop(['B', 'C'], axis=1) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + >>> df.drop(columns=['B', 'C']) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + Drop a row by index + + >>> df.drop([0, 1]) + A B C D + 2 8 9 10 11 + + Drop columns and/or rows of MultiIndex DataFrame + + >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], + ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], + ... [250, 150], [1.5, 0.8], [320, 250], + ... [1, 0.8], [0.3,0.2]]) + >>> df + big small + lama speed 45.0 30.0 + weight 200.0 100.0 + length 1.5 1.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + length 1.5 0.8 + falcon speed 320.0 250.0 + weight 1.0 0.8 + length 0.3 0.2 + + >>> df.drop(index='cow', columns='small') + big + lama speed 45.0 + weight 200.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + + >>> df.drop(index='length', level=1) + big small + lama speed 45.0 30.0 + weight 200.0 100.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + falcon speed 320.0 250.0 + weight 1.0 0.8 + """ + return super(DataFrame, self).drop(labels=labels, axis=axis, + index=index, columns=columns, + level=level, inplace=inplace, + errors=errors) + @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), ('level', None)]) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..6385a27e60cca 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2799,73 +2799,7 @@ def reindex_like(self, other, method=None, copy=True, limit=None, def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): - """ - Return new object with labels in requested axis removed. - Parameters - ---------- - labels : single label or list-like - Index or column labels to drop. - axis : int or axis name - Whether to drop labels from the index (0 / 'index') or - columns (1 / 'columns'). - index, columns : single label or list-like - Alternative to specifying `axis` (``labels, axis=1`` is - equivalent to ``columns=labels``). - - .. versionadded:: 0.21.0 - level : int or level name, default None - For MultiIndex - inplace : bool, default False - If True, do operation inplace and return None. - errors : {'ignore', 'raise'}, default 'raise' - If 'ignore', suppress error and existing labels are dropped. - - Returns - ------- - dropped : type of caller - - Raises - ------ - KeyError - If none of the labels are found in the selected axis - - Examples - -------- - >>> df = pd.DataFrame(np.arange(12).reshape(3,4), - columns=['A', 'B', 'C', 'D']) - >>> df - A B C D - 0 0 1 2 3 - 1 4 5 6 7 - 2 8 9 10 11 - - Drop columns - - >>> df.drop(['B', 'C'], axis=1) - A D - 0 0 3 - 1 4 7 - 2 8 11 - - >>> df.drop(columns=['B', 'C']) - A D - 0 0 3 - 1 4 7 - 2 8 11 - - Drop a row by index - - >>> df.drop([0, 1]) - A B C D - 2 8 9 10 11 - - Notes - ----- - Specifying both `labels` and `index` or `columns` will raise a - ValueError. - - """ inplace = validate_bool_kwarg(inplace, 'inplace') if labels is not None: diff --git a/pandas/core/series.py b/pandas/core/series.py index 069f0372ab6e1..4a0fe63a829c6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2660,6 +2660,97 @@ def rename(self, index=None, **kwargs): def reindex(self, index=None, **kwargs): return super(Series, self).reindex(index=index, **kwargs) + def drop(self, labels=None, axis=0, index=None, columns=None, + level=None, inplace=False, errors='raise'): + """ + Return Series with specified index labels removed. + + Remove elements of a Series based on specifying the index labels. + When using a multi-index, labels on different levels can be removed + by specifying the level. + + Parameters + ---------- + labels : single label or list-like + Index labels to drop. + axis : 0, default 0 + Redundant for application on Series. + index, columns : None + Redundant for application on Series, but index can be used instead + of labels. + + .. versionadded:: 0.21.0 + level : int or level name, optional + For MultiIndex, level for which the labels will be removed. + inplace : bool, default False + If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are dropped. + + Returns + ------- + dropped : pandas.Series + + See Also + -------- + Series.reindex : Return only specified index labels of Series. + Series.dropna : Return series without null values. + Series.drop_duplicates : Return Series with duplicate values removed. + DataFrame.drop : Drop specified labels from rows or columns. + + Raises + ------ + KeyError + If none of the labels are found in the index. + + Examples + -------- + >>> s = pd.Series(data=np.arange(3), index=['A','B','C']) + >>> s + A 0 + B 1 + C 2 + dtype: int64 + + Drop labels B en C + + >>> s.drop(labels=['B','C']) + A 0 + dtype: int64 + + Drop 2nd level label in MultiIndex Series + + >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], + ... index=midx) + >>> s + lama speed 45.0 + weight 200.0 + length 1.2 + cow speed 30.0 + weight 250.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + dtype: float64 + + >>> s.drop(labels='weight', level=1) + lama speed 45.0 + length 1.2 + cow speed 30.0 + length 1.5 + falcon speed 320.0 + length 0.3 + dtype: float64 + """ + return super(Series, self).drop(labels=labels, axis=axis, index=index, + columns=columns, level=level, + inplace=inplace, errors=errors) + @Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` Errors found: Errors in parameters section Parameter "columns" description should finish with "." ``` This error is caused by `.. versionadded:: 0.21.0`, which needs to stay in place.
https://api.github.com/repos/pandas-dev/pandas/pulls/20120
2018-03-10T12:48:40Z
2018-03-13T16:05:54Z
2018-03-13T16:05:54Z
2018-03-13T17:49:55Z
DOC: improved the scatter method
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c5b22effc6486..ab4b223654d31 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -3151,23 +3151,82 @@ def pie(self, y=None, **kwds): def scatter(self, x, y, s=None, c=None, **kwds): """ - Scatter plot + Create a scatter plot with varying marker point size and color. + + The coordinates of each point are defined by two dataframe columns and + filled circles are used to represent each point. This kind of plot is + useful to see complex correlations between two variables. Points could + be for instance natural 2D coordinates like longitude and latitude in + a map or, in general, any pair of metrics that can be plotted against + each other. Parameters ---------- - x, y : label or position, optional - Coordinates for each point. + x : int or str + The column name or column position to be used as horizontal + coordinates for each point. + y : int or str + The column name or column position to be used as vertical + coordinates for each point. s : scalar or array_like, optional - Size of each point. - c : label or position, optional - Color of each point. - `**kwds` : optional - Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + The size of each point. Possible values are: + + - A single scalar so all points have the same size. + + - A sequence of scalars, which will be used for each point's size + recursively. For instance, when passing [2,14] all points size + will be either 2 or 14, alternatively. + + c : str, int or array_like, optional + The color of each point. Possible values are: + + - A single color string referred to by name, RGB or RGBA code, + for instance 'red' or '#a98d19'. + + - A sequence of color strings referred to by name, RGB or RGBA + code, which will be used for each point's color recursively. For + intance ['green','yellow'] all points will be filled in green or + yellow, alternatively. + + - A column name or position whose values will be used to color the + marker points according to a colormap. + + **kwds + Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + + See Also + -------- + matplotlib.pyplot.scatter : scatter plot using multiple input data + formats. + + Examples + -------- + Let's see how to draw a scatter plot using coordinates from the values + in a DataFrame's columns. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], + ... [6.4, 3.2, 1], [5.9, 3.0, 2]], + ... columns=['length', 'width', 'species']) + >>> ax1 = df.plot.scatter(x='length', + ... y='width', + ... c='DarkBlue') + + And now with the color determined by a column as well. + + .. plot:: + :context: close-figs + + >>> ax2 = df.plot.scatter(x='length', + ... y='width', + ... c='species', + ... colormap='viridis') """ return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.DataFrame.plot.scatter) ################## ################################################################################ A scatter plot with point size *s* and color *c*. The coordinates of each point *x,y* are defined by two dataframe columns and filled circles are used to represent each point. Parameters ---------- x : column name or column position Horizontal and vertical coordinates of each point. y : column name or column position Vertical coordinates of each point. s : scalar or array_like, optional Size of each point. c : label, column name or column position, optional Color of each point. kwds : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them See Also -------- matplotlib.pyplot.scatter : scatter plot using multiple input data formats. Examples -------- .. plot:: :context: close-figs >>> from sklearn.datasets import load_iris >>> iris = load_iris() >>> df = pd.DataFrame(iris.data[:,:2], ... columns=iris.feature_names[:2]) >>> df['species'] = load_iris().target >>> f = df.plot.scatter(x='sepal length (cm)', ... y='sepal width (cm)', ... c='species', ... colormap='viridis') ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.plot.scatter" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20118
2018-03-10T12:47:44Z
2018-03-14T12:57:47Z
2018-03-14T12:57:47Z
2018-03-14T12:57:47Z
DOC: update the pandas.Index.duplicated and pandas.Series.duplicated docstring
diff --git a/pandas/core/base.py b/pandas/core/base.py index fd039480fc6f1..257b26b64e642 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1197,24 +1197,6 @@ def drop_duplicates(self, keep='first', inplace=False): else: return result - _shared_docs['duplicated'] = ( - """Return boolean %(duplicated)s denoting duplicate values - - Parameters - ---------- - keep : {'first', 'last', False}, default 'first' - - ``first`` : Mark duplicates as ``True`` except for the first - occurrence. - - ``last`` : Mark duplicates as ``True`` except for the last - occurrence. - - False : Mark all duplicates as ``True``. - - Returns - ------- - duplicated : %(duplicated)s - """) - - @Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs) def duplicated(self, keep='first'): from pandas.core.algorithms import duplicated if isinstance(self, ABCIndexClass): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f69777af31c9c..de4ea5fcfaefa 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4325,8 +4325,60 @@ def drop_duplicates(self, keep='first'): """ return super(Index, self).drop_duplicates(keep=keep) - @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs) def duplicated(self, keep='first'): + """ + Indicate duplicate index values. + + Duplicated values are indicated as ``True`` values in the resulting + array. Either all duplicates, all except the first, or all except the + last occurrence of duplicates can be indicated. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + The value or values in a set of duplicates to mark as missing. + + - 'first' : Mark duplicates as ``True`` except for the first + occurrence. + - 'last' : Mark duplicates as ``True`` except for the last + occurrence. + - ``False`` : Mark all duplicates as ``True``. + + Examples + -------- + By default, for each set of duplicated values, the first occurrence is + set to False and all others to True: + + >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) + >>> idx.duplicated() + array([False, False, True, False, True]) + + which is equivalent to + + >>> idx.duplicated(keep='first') + array([False, False, True, False, True]) + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True: + + >>> idx.duplicated(keep='last') + array([ True, False, True, False, False]) + + By setting keep on ``False``, all duplicates are True: + + >>> idx.duplicated(keep=False) + array([ True, False, True, False, True]) + + Returns + ------- + numpy.ndarray + + See Also + -------- + pandas.Series.duplicated : Equivalent method on pandas.Series + pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame + pandas.Index.drop_duplicates : Remove duplicate values from Index + """ return super(Index, self).duplicated(keep=keep) _index_shared_docs['fillna'] = """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 71d39ad812d20..332c6613a230c 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -399,7 +399,7 @@ def unique(self, level=None): return self._shallow_copy(result, categories=result.categories, ordered=result.ordered) - @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs) + @Appender(Index.duplicated.__doc__) def duplicated(self, keep='first'): from pandas._libs.hashtable import duplicated_int64 codes = self.codes.astype('i8') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index be64f6f4bfd0f..56f1f3c0bdd67 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -916,7 +916,7 @@ def f(k, stringify): for k, stringify in zip(key, self._have_mixed_levels)]) return hash_tuple(key) - @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs) + @Appender(Index.duplicated.__doc__) def duplicated(self, keep='first'): from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64 diff --git a/pandas/core/series.py b/pandas/core/series.py index d9ee1c856600f..07cfc671cbd28 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1458,8 +1458,78 @@ def drop_duplicates(self, keep='first', inplace=False): """ return super(Series, self).drop_duplicates(keep=keep, inplace=inplace) - @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs) def duplicated(self, keep='first'): + """ + Indicate duplicate Series values. + + Duplicated values are indicated as ``True`` values in the resulting + Series. Either all duplicates, all except the first or all except the + last occurrence of duplicates can be indicated. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + - 'first' : Mark duplicates as ``True`` except for the first + occurrence. + - 'last' : Mark duplicates as ``True`` except for the last + occurrence. + - ``False`` : Mark all duplicates as ``True``. + + Examples + -------- + By default, for each set of duplicated values, the first occurrence is + set on False and all others on True: + + >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) + >>> animals.duplicated() + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + which is equivalent to + + >>> animals.duplicated(keep='first') + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True: + + >>> animals.duplicated(keep='last') + 0 True + 1 False + 2 True + 3 False + 4 False + dtype: bool + + By setting keep on ``False``, all duplicates are True: + + >>> animals.duplicated(keep=False) + 0 True + 1 False + 2 True + 3 False + 4 True + dtype: bool + + Returns + ------- + pandas.core.series.Series + + See Also + -------- + pandas.Index.duplicated : Equivalent method on pandas.Index + pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame + pandas.Series.drop_duplicates : Remove duplicate values from Series + """ return super(Series, self).duplicated(keep=keep) def idxmin(self, axis=None, skipna=True, *args, **kwargs):
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: Method `pandas.Index.duplicated`: ``` ################################################################################ ##################### Docstring (pandas.Index.duplicated) ##################### ################################################################################ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True], dtype=bool) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True], dtype=bool) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False], dtype=bool) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True], dtype=bool) Returns ------- numpy.ndarray See Also -------- pandas.Series.duplicated : equivalent method on pandas.Series ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameter "keep" description should start with capital letter ``` Method `pandas.Series.duplicated`: ``` ################################################################################ ##################### Docstring (pandas.Series.duplicated) ##################### ################################################################################ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool Returnsinde ------- pandas.core.series.Series See Also -------- pandas.Index.duplicated : equivalent method on pandas.Index ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameter "keep" description should start with capital letter ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. - We (Ghent chapter) decided that an additional line of text (with capital) was less useful than starting with explaining the list of options. Instead of using the template-based version used before, we split out both docstrings and made a separate for the `Index` versus the `Series`. This introduces some redundancy and overlap (basically, the `keep` argument, also shared with `drop_duplicated`), but provides a cleaner option by having the examples written inside the docstring of the methods and not *somewhere* else in the code.
https://api.github.com/repos/pandas-dev/pandas/pulls/20117
2018-03-10T12:42:20Z
2018-03-14T15:01:26Z
2018-03-14T15:01:26Z
2020-01-13T11:22:21Z
DOC: update the pandas.Index.drop_duplicates and pandas.Series.drop_duplicates docstring
diff --git a/pandas/core/base.py b/pandas/core/base.py index 280b8849792e3..fd039480fc6f1 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1184,24 +1184,6 @@ def searchsorted(self, value, side='left', sorter=None): # needs coercion on the key (DatetimeIndex does already) return self.values.searchsorted(value, side=side, sorter=sorter) - _shared_docs['drop_duplicates'] = ( - """Return %(klass)s with duplicate values removed - - Parameters - ---------- - - keep : {'first', 'last', False}, default 'first' - - ``first`` : Drop duplicates except for the first occurrence. - - ``last`` : Drop duplicates except for the last occurrence. - - False : Drop all duplicates. - %(inplace)s - - Returns - ------- - deduplicated : %(klass)s - """) - - @Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs) def drop_duplicates(self, keep='first', inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(self, ABCIndexClass): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7e6ae88a26e7c..ae81d0235d2dc 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4017,8 +4017,52 @@ def unique(self, level=None): result = super(Index, self).unique() return self._shallow_copy(result) - @Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs) def drop_duplicates(self, keep='first'): + """ + Return Index with duplicate values removed. + + Parameters + ---------- + keep : {'first', 'last', ``False``}, default 'first' + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + Returns + ------- + deduplicated : Index + + See Also + -------- + Series.drop_duplicates : equivalent method on Series + DataFrame.drop_duplicates : equivalent method on DataFrame + Index.duplicated : related method on Index, indicating duplicate + Index values. + + Examples + -------- + Generate an pandas.Index with duplicate values. + + >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) + + The `keep` parameter controls which duplicate values are removed. + The value 'first' keeps the first occurrence for each + set of duplicated entries. The default value of keep is 'first'. + + >>> idx.drop_duplicates(keep='first') + Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') + + The value 'last' keeps the last occurrence for each set of duplicated + entries. + + >>> idx.drop_duplicates(keep='last') + Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') + + The value ``False`` discards all sets of duplicated entries. + + >>> idx.drop_duplicates(keep=False) + Index(['cow', 'beetle', 'hippo'], dtype='object') + """ return super(Index, self).drop_duplicates(keep=keep) @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs) diff --git a/pandas/core/series.py b/pandas/core/series.py index 069f0372ab6e1..090f599c860ae 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1316,8 +1316,77 @@ def unique(self): return result - @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs) def drop_duplicates(self, keep='first', inplace=False): + """ + Return Series with duplicate values removed. + + Parameters + ---------- + keep : {'first', 'last', ``False``}, default 'first' + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + inplace : boolean, default ``False`` + If ``True``, performs operation inplace and returns None. + + Returns + ------- + deduplicated : Series + + See Also + -------- + Index.drop_duplicates : equivalent method on Index + DataFrame.drop_duplicates : equivalent method on DataFrame + Series.duplicated : related method on Series, indicating duplicate + Series values. + + Examples + -------- + Generate an Series with duplicated entries. + + >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], + ... name='animal') + >>> s + 0 lama + 1 cow + 2 lama + 3 beetle + 4 lama + 5 hippo + Name: animal, dtype: object + + With the 'keep' parameter, the selection behaviour of duplicated values + can be changed. The value 'first' keeps the first occurrence for each + set of duplicated entries. The default value of keep is 'first'. + + >>> s.drop_duplicates() + 0 lama + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + + The value 'last' for parameter 'keep' keeps the last occurrence for + each set of duplicated entries. + + >>> s.drop_duplicates(keep='last') + 1 cow + 3 beetle + 4 lama + 5 hippo + Name: animal, dtype: object + + The value ``False`` for parameter 'keep' discards all sets of + duplicated entries. Setting the value of 'inplace' to ``True`` performs + the operation inplace and returns ``None``. + + >>> s.drop_duplicates(keep=False, inplace=True) + >>> s + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + """ return super(Series, self).drop_duplicates(keep=keep, inplace=inplace) @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: Method ```pandas.Index.drop_duplicates```: ``` ################################################################################ ################### Docstring (pandas.Index.drop_duplicates) ################### ################################################################################ Return Index with duplicate values removed. The drop_duplicates method can remove occurences or whole sets of duplicated entries in a pandas.Index object. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- deduplicated : Index See Also -------- pandas.Series.drop_duplicates : equivalent method on pandas.Series Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameter "keep" description should start with capital letter ``` Method ```pandas.Series.drop_duplicates```: ``` ################################################################################ ################## Docstring (pandas.Series.drop_duplicates) ################## ################################################################################ Return Series with duplicate values removed. The drop_duplicates method can remove occurences or whole sets of duplicated entries in a pandas.Series object. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : boolean, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- deduplicated : Series See Also -------- pandas.Index.drop_duplicates : equivalent method on pandas.Index Examples -------- Generate an Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. Setting the value of 'inplace' to ``True`` performs the operation inplace and returns ``None``. >>> s.drop_duplicates(keep=False, inplace=True) >>> s 1 cow 3 beetle 5 hippo Name: animal, dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Errors in parameters section Parameter "keep" description should start with capital letter ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. The parameter keyword fails in the validation script. In the DOC sprint team, it was decided that summing up the parameter values is more valuable.
https://api.github.com/repos/pandas-dev/pandas/pulls/20114
2018-03-10T12:22:45Z
2018-03-10T15:41:59Z
2018-03-10T15:41:59Z
2018-03-10T15:42:54Z
DOC: update the pandas.DataFrame.hist docstring
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 98fdcf8f94ae0..4fad350ed2761 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2128,50 +2128,81 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, sharey=False, figsize=None, layout=None, bins=10, **kwds): """ - Draw histogram of the DataFrame's series using matplotlib / pylab. + Make a histogram of the DataFrame's. + + A `histogram`_ is a representation of the distribution of data. + This function calls :meth:`matplotlib.pyplot.hist`, on each series in + the DataFrame, resulting in one histogram per column. + + .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame + The pandas object holding the data. column : string or sequence - If passed, will be used to limit data to a subset of columns + If passed, will be used to limit data to a subset of columns. by : object, optional - If passed, then used to form histograms for separate groups + If passed, then used to form histograms for separate groups. grid : boolean, default True - Whether to show axis grid lines + Whether to show axis grid lines. xlabelsize : int, default None - If specified changes the x-axis label size + If specified changes the x-axis label size. xrot : float, default None - rotation of x axis labels + Rotation of x axis labels. For example, a value of 90 displays the + x labels rotated 90 degrees clockwise. ylabelsize : int, default None - If specified changes the y-axis label size + If specified changes the y-axis label size. yrot : float, default None - rotation of y axis labels - ax : matplotlib axes object, default None + Rotation of y axis labels. For example, a value of 90 displays the + y labels rotated 90 degrees clockwise. + ax : Matplotlib axes object, default None + The axes to plot the histogram on. sharex : boolean, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax - is passed in; Be aware, that passing in both an ax and sharex=True - will alter all x axis labels for all subplots in a figure! + is passed in. + Note that passing in both an ax and sharex=True will alter all x axis + labels for all subplots in a figure. sharey : boolean, default False In case subplots=True, share y axis and set some y axis labels to - invisible + invisible. figsize : tuple - The size of the figure to create in inches by default + The size in inches of the figure to create. Uses the value in + `matplotlib.rcParams` by default. layout : tuple, optional - Tuple of (rows, columns) for the layout of the histograms + Tuple of (rows, columns) for the layout of the histograms. bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. - `**kwds` : other plotting keyword arguments - To be passed to hist function + **kwds + All other plotting keyword arguments to be passed to + :meth:`matplotlib.pyplot.hist`. + + Returns + ------- + axes : matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- - matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. + matplotlib.pyplot.hist : Plot a histogram using matplotlib. + + Examples + -------- + + .. plot:: + :context: close-figs + + This example draws a histogram based on the length and width of + some animals, displayed in three bins + >>> df = pd.DataFrame({ + ... 'length': [1.5, 0.5, 1.2, 0.9, 3], + ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] + ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) + >>> hist = df.hist(bins=3) """ _converter._WARN = False if by is not None:
- [x] PR title is "DOC: update the pandas.DataFrame.hist docstring" - [x] The validation script passes: `scripts/validate_docstrings.py pandas.DataFrame.hist` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single pandas.DataFrame.hist` - [x] It has been proofread on language by another sprint participant ``` ################################################################################ ###################### Docstring (pandas.DataFrame.hist) ###################### ################################################################################ Draw histogram of the DataFrame's series using matplotlib / pylab. A histogram is a representation of the distribution of numerical data. This function wraps the matplotlib histogram function for each serie in the DataFrame. It returns an array with a plot for each histogram. Parameters ---------- data : DataFrame The pandas object holding the data. column : string or sequence If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : boolean, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. ax : Matplotlib axes object, default None The histogram axes. sharex : boolean, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in; Be aware, that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure!. sharey : boolean, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple The size of the figure to create in inches by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. kwds : Keyword Arguments All other plotting keyword arguments to be passed to matplotlib's boxplot function. Returns ------- axes : matplotlib.AxesSubplot or np.array of them See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. Examples -------- .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'length': [ 1.5, 0.5, 1.2, 0.9, 3], ... 'width': [ 0.7, 0.2, 0.15, 0.2, 1.1] ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.hist" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20113
2018-03-10T11:57:44Z
2018-03-13T20:41:07Z
2018-03-13T20:41:07Z
2018-03-13T20:41:08Z
DOC: update validate_docstrings.py Validation script checks for tabs
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 8425882f07be1..a7e121f9069fc 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -455,6 +455,12 @@ def validate_one(func_name): if not rel_desc: errs.append('Missing description for ' 'See Also "{}" reference'.format(rel_name)) + + for line in doc.raw_doc.splitlines(): + if re.match("^ *\t", line): + errs.append('Tabs found at the start of line "{}", ' + 'please use whitespace only'.format(line.lstrip())) + examples_errs = '' if not doc.examples: errs.append('No examples section found')
Add a check to validate the docstrings don't have tabs. The documentation uses whitespace only, adding the check will prevent tabs being added in the sprint or future submissions Sample output (elided non changed parts) ``` ... ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: ... Tabs found in the docstring, please use whitespace only ... ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20112
2018-03-10T11:55:59Z
2018-03-14T22:42:15Z
2018-03-14T22:42:15Z
2018-03-15T00:06:42Z
DOC: Improved the docstring of pandas.Series.sample
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..9186cb33e5b7a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3718,7 +3718,9 @@ def tail(self, n=5): def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): """ - Returns a random sample of items from an axis of object. + Return a random sample of items from an axis of object. + + You can use `random_state` for reproducibility. Parameters ---------- @@ -3755,7 +3757,6 @@ def sample(self, n=None, frac=None, replace=False, weights=None, Examples -------- - Generate an example ``Series`` and ``DataFrame``: >>> s = pd.Series(np.random.randn(50)) @@ -3794,6 +3795,16 @@ def sample(self, n=None, frac=None, replace=False, weights=None, 40 0.823173 -0.078816 1.009536 1.015108 15 1.421154 -0.055301 -1.922594 -0.019696 6 -0.148339 0.832938 1.787600 -1.383767 + + You can use `random state` for reproducibility: + + >>> df.sample(random_state=1) + A B C D + 37 -2.027662 0.103611 0.237496 -0.165867 + 43 -0.259323 -0.583426 1.516140 -0.479118 + 12 -1.686325 -0.579510 0.985195 -0.460286 + 8 1.167946 0.429082 1.215742 -1.636041 + 9 1.197475 -0.864188 1.554031 -1.505264 """ if axis is None:
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ####################### Docstring (pandas.Series.sample) ####################### ################################################################################ Return a random sample of items from an axis of object. You can use `random state` for reproducibility Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : boolean, optional Sample with or without replacement. Default = False. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- A new object of same type as caller. See Also -------- Series.sample : Returns a random sample of items from an axis of object. DataFrame.sample : Returns a random sample of items from an axis of object. Panel.sample : Returns a random sample of items from an axis of object. Examples -------- Generate an example ``Series`` and ``DataFrame``: >>> s = pd.Series(np.random.randn(50)) >>> s.head() 0 -0.038497 1 1.820773 2 -0.972766 3 -1.598270 4 -1.095526 dtype: float64 >>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD')) >>> df.head() A B C D 0 0.016443 -2.318952 -0.566372 -1.028078 1 -1.051921 0.438836 0.658280 -0.175797 2 -1.243569 -0.364626 -0.215065 0.057736 3 1.768216 0.404512 -0.385604 -1.457834 4 1.072446 -1.137172 0.314194 -0.046661 Next extract a random sample from both of these objects... 3 random elements from the ``Series``: >>> s.sample(n=3) 27 -0.994689 55 -1.049016 67 -0.224565 dtype: float64 And a random 10% of the ``DataFrame`` with replacement: >>> df.sample(frac=0.1, replace=True) A B C D 35 1.981780 0.142106 1.817165 -0.290805 49 -1.336199 -0.448634 -0.789640 0.217116 40 0.823173 -0.078816 1.009536 1.015108 15 1.421154 -0.055301 -1.922594 -0.019696 6 -0.148339 0.832938 1.787600 -1.383767 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 53, in pandas.Series.sample Failed example: s.head() Expected: 0 -0.038497 1 1.820773 2 -0.972766 3 -1.598270 4 -1.095526 dtype: float64 Got: 0 -0.316288 1 -0.109803 2 0.398450 3 -0.307658 4 -0.210365 dtype: float64 ********************************************************************** Line 61, in pandas.Series.sample Failed example: df.head() Expected: A B C D 0 0.016443 -2.318952 -0.566372 -1.028078 1 -1.051921 0.438836 0.658280 -0.175797 2 -1.243569 -0.364626 -0.215065 0.057736 3 1.768216 0.404512 -0.385604 -1.457834 4 1.072446 -1.137172 0.314194 -0.046661 Got: A B C D 0 0.374238 -0.608431 -0.126340 -0.764207 1 0.433942 0.576081 -0.704511 1.708611 2 1.145009 -0.051829 -0.614948 -0.458692 3 0.153273 -0.692912 -0.200969 -0.725891 4 0.780466 0.616172 2.143758 -2.081198 ********************************************************************** Line 73, in pandas.Series.sample Failed example: s.sample(n=3) Expected: 27 -0.994689 55 -1.049016 67 -0.224565 dtype: float64 Got: 20 1.077020 41 -0.847340 11 -1.567316 dtype: float64 ********************************************************************** Line 81, in pandas.Series.sample Failed example: df.sample(frac=0.1, replace=True) Expected: A B C D 35 1.981780 0.142106 1.817165 -0.290805 49 -1.336199 -0.448634 -0.789640 0.217116 40 0.823173 -0.078816 1.009536 1.015108 15 1.421154 -0.055301 -1.922594 -0.019696 6 -0.148339 0.832938 1.787600 -1.383767 Got: A B C D 7 0.663274 0.980879 -0.290907 -0.063392 7 0.663274 0.980879 -0.290907 -0.063392 37 2.074749 -0.062022 -0.766187 -0.501413 36 -0.315902 0.125332 -1.271485 -1.619816 44 1.438970 -1.112939 0.386373 0.828501 ``` The validation errors are correct because sample requires a randomic and unpredictable output. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
https://api.github.com/repos/pandas-dev/pandas/pulls/20109
2018-03-10T11:49:35Z
2018-03-15T14:11:33Z
2018-03-15T14:11:33Z
2018-03-15T14:12:19Z
DOC: update the Series.reset_index DocString
diff --git a/pandas/core/series.py b/pandas/core/series.py index 069f0372ab6e1..acc96487adb40 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1002,55 +1002,112 @@ def _set_value(self, label, value, takeable=False): def reset_index(self, level=None, drop=False, name=None, inplace=False): """ - Analogous to the :meth:`pandas.DataFrame.reset_index` function, see - docstring there. + Generate a new DataFrame or Series with the index reset. + + This is useful when the index needs to be treated as a column, or + when the index is meaningless and needs to be reset to the default + before another operation. Parameters ---------- - level : int, str, tuple, or list, default None - Only remove the given levels from the index. Removes all levels by - default - drop : boolean, default False - Do not try to insert index into dataframe columns - name : object, default None - The name of the column corresponding to the Series values - inplace : boolean, default False - Modify the Series in place (do not create a new object) + level : int, str, tuple, or list, default optional + For a Series with a MultiIndex, only remove the specified levels + from the index. Removes all levels by default. + drop : bool, default False + Just reset the index, without inserting it as a column in + the new DataFrame. + name : object, optional + The name to use for the column containing the original Series + values. Uses ``self.name`` by default. This argument is ignored + when `drop` is True. + inplace : bool, default False + Modify the Series in place (do not create a new object). Returns - ---------- - resetted : DataFrame, or Series if drop == True + ------- + Series or DataFrame + When `drop` is False (the default), a DataFrame is returned. + The newly created columns will come first in the DataFrame, + followed by the original Series values. + When `drop` is True, a `Series` is returned. + In either case, if ``inplace=True``, no value is returned. + + See Also + -------- + DataFrame.reset_index: Analogous function for DataFrame. Examples -------- - >>> s = pd.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], - ... name = 'idx')) + + >>> s = pd.Series([1, 2, 3, 4], name='foo', + ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) + + Generate a DataFrame with default index. + >>> s.reset_index() - idx 0 - 0 a 1 - 1 b 2 - 2 c 3 - 3 d 4 - - >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', - ... 'foo', 'qux', 'qux']), - ... np.array(['one', 'two', 'one', 'two', 'one', 'two', - ... 'one', 'two'])] + idx foo + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To specify the name of the new column use `name`. + + >>> s.reset_index(name='values') + idx values + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To generate a new Series with the default set `drop` to True. + + >>> s.reset_index(drop=True) + 0 1 + 1 2 + 2 3 + 3 4 + Name: foo, dtype: int64 + + To update the Series in place, without generating a new one + set `inplace` to True. Note that it also requires ``drop=True``. + + >>> s.reset_index(inplace=True, drop=True) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + Name: foo, dtype: int64 + + The `level` parameter is interesting for Series with a multi-level + index. + + >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), + ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( - ... range(8), + ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) + + To remove a specific level from the Index, use `level`. + >>> s2.reset_index(level='a') - a 0 + a foo b - one bar 0 - two bar 1 - one baz 2 - two baz 3 - one foo 4 - two foo 5 - one qux 6 - two qux 7 + one bar 0 + two bar 1 + one baz 2 + two baz 3 + + If `level` is not set, all levels are removed from the Index. + + >>> s2.reset_index() + a b foo + 0 bar one 0 + 1 bar two 1 + 2 baz one 2 + 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop:
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ################################################################################ ############## Docstring (pandas.core.series.Series.reset_index) ############## ################################################################################ Reset the index of the Serie. For a Serie with multi-level index, return a new Serie with labeling information in the columns under the index names, defaulting to ‘level_0’, ‘level_1’, etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default ‘index’ or ‘level_0’ (if ‘index’ is already taken) will be used. Parameters ---------- level : `int`, `str`, `tuple`, or `list`, default `None` Only remove the given levels from the index. Removes all levels by default. drop : `boolean`, default `False` Do not try to insert index into dataframe columns. name : `object`, default `None` The name of the column corresponding to the Series values. inplace : `boolean`, default `False` Modify the Series in place (do not create a new object). Returns ---------- resetted : `DataFrame`, or Series if `drop == True` See Also -------- :meth:`pandas.DataFrame.reset_index`: Analogous funciton for DataFrame Examples -------- >>> s = pd.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], ... name = 'idx')) >>> s.reset_index() idx 0 0 a 1 1 b 2 2 c 3 3 d 4 >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', ... 'foo', 'qux', 'qux']), ... np.array(['one', 'two', 'one', 'two', 'one', 'two', ... 'one', 'two'])] >>> s2 = pd.Series( ... range(8), ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) >>> s2.reset_index(level='a') a 0 b one bar 0 two bar 1 one baz 2 two baz 3 one foo 4 two foo 5 one qux 6 two qux 7 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.core.series.Series.reset_index" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20107
2018-03-10T11:11:47Z
2018-03-15T22:03:08Z
2018-03-15T22:03:08Z
2018-03-15T22:03:27Z
DOC: update the MultiIndex.swaplevel docstring
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 73f4aee1c4880..7cd7e8dd274df 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1775,22 +1775,45 @@ def droplevel(self, level=0): def swaplevel(self, i=-2, j=-1): """ - Swap level i with level j. Do not change the ordering of anything + Swap level i with level j. + + Calling this method does not change the ordering of the values. Parameters ---------- - i, j : int, string (can be mixed) - Level of index to be swapped. Can pass level name as string. + i : int, str, default -2 + First level of index to be swapped. Can pass level name as string. + Type of parameters can be mixed. + j : int, str, default -1 + Second level of index to be swapped. Can pass level name as string. + Type of parameters can be mixed. Returns ------- - swapped : MultiIndex + MultiIndex + A new MultiIndex .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. + See Also + -------- + Series.swaplevel : Swap levels i and j in a MultiIndex + Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a + particular axis + + Examples + -------- + >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], + ... labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) + >>> mi + MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], + labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) + >>> mi.swaplevel(0, 1) + MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], + labels=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_labels = list(self.labels)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################### Docstring (pandas.MultiIndex.swaplevel) ################### ################################################################################ Swap level i with level j. Calling this method does not change the ordering of anything. Parameters ---------- i : int, str First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A newly allocated MultiIndex .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- MultiIndex : A multi-level, or hierarchical, index object for pandas objects. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel() MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], labels=[[0, 1, 0, 1], [0, 0, 1, 1]]) ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.MultiIndex.swaplevel" correct. : ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20105
2018-03-10T10:22:46Z
2018-03-10T14:49:31Z
2018-03-10T14:49:31Z
2018-03-10T14:49:41Z
DOC: Update pandas.cut docstring
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 30132ddc05c40..be28f7091712f 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -26,69 +26,133 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False): """ - Return indices of half-open bins to which each value of `x` belongs. + Bin values into discrete intervals. + + Use `cut` when you need to segment and sort data values into bins. This + function is also useful for going from a continuous variable to a + categorical variable. For example, `cut` could convert ages to groups of + age ranges. Supports binning into an equal number of bins, or a + pre-specified array of bins. Parameters ---------- x : array-like - Input array to be binned. It has to be 1-dimensional. - bins : int, sequence of scalars, or IntervalIndex - If `bins` is an int, it defines the number of equal-width bins in the - range of `x`. However, in this case, the range of `x` is extended - by .1% on each side to include the min or max values of `x`. If - `bins` is a sequence it defines the bin edges allowing for - non-uniform bin width. No extension of the range of `x` is done in - this case. - right : bool, optional - Indicates whether the bins include the rightmost edge or not. If - right == True (the default), then the bins [1,2,3,4] indicate - (1,2], (2,3], (3,4]. - labels : array or boolean, default None - Used as labels for the resulting bins. Must be of the same length as - the resulting bins. If False, return only integer indicators of the - bins. - retbins : bool, optional - Whether to return the bins or not. Can be useful if bins is given + The input array to be binned. Must be 1-dimensional. + bins : int, sequence of scalars, or pandas.IntervalIndex + The criteria to bin by. + + * int : Defines the number of equal-width bins in the range of `x`. The + range of `x` is extended by .1% on each side to include the minimum + and maximum values of `x`. + * sequence of scalars : Defines the bin edges allowing for non-uniform + width. No extension of the range of `x` is done. + * IntervalIndex : Defines the exact bins to be used. + + right : bool, default True + Indicates whether `bins` includes the rightmost edge or not. If + ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` + indicate (1,2], (2,3], (3,4]. This argument is ignored when + `bins` is an IntervalIndex. + labels : array or bool, optional + Specifies the labels for the returned bins. Must be the same length as + the resulting bins. If False, returns only integer indicators of the + bins. This affects the type of the output container (see below). + This argument is ignored when `bins` is an IntervalIndex. + retbins : bool, default False + Whether to return the bins or not. Useful when bins is provided as a scalar. - precision : int, optional - The precision at which to store and display the bins labels - include_lowest : bool, optional + precision : int, default 3 + The precision at which to store and display the bins labels. + include_lowest : bool, default False Whether the first interval should be left-inclusive or not. Returns ------- - out : Categorical or Series or array of integers if labels is False - The return type (Categorical or Series) depends on the input: a Series - of type category if input is a Series else Categorical. Bins are - represented as categories when categorical data is returned. - bins : ndarray of floats - Returned only if `retbins` is True. + out : pandas.Categorical, Series, or ndarray + An array-like object representing the respective bin for each value + of `x`. The type depends on the value of `labels`. - Notes - ----- - The `cut` function can be useful for going from a continuous variable to - a categorical variable. For example, `cut` could convert ages to groups - of age ranges. + * True (default) : returns a Series for Series `x` or a + pandas.Categorical for all other inputs. The values stored within + are Interval dtype. - Any NA values will be NA in the result. Out of bounds values will be NA in - the resulting Categorical object + * sequence of scalars : returns a Series for Series `x` or a + pandas.Categorical for all other inputs. The values stored within + are whatever the type in the sequence is. + * False : returns an ndarray of integers. + + bins : numpy.ndarray or IntervalIndex. + The computed or specified bins. Only returned when `retbins=True`. + For scalar or sequence `bins`, this is an ndarray with the computed + bins. For an IntervalIndex `bins`, this is equal to `bins`. + + See Also + -------- + qcut : Discretize variable into equal-sized buckets based on rank + or based on sample quantiles. + pandas.Categorical : Array type for storing data that come from a + fixed set of values. + Series : One-dimensional array with axis labels (including time series). + pandas.IntervalIndex : Immutable Index implementing an ordered, + sliceable set. + + Notes + ----- + Any NA values will be NA in the result. Out of bounds values will be NA in + the resulting Series or pandas.Categorical object. Examples -------- - >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True) + Discretize into three equal-sized bins. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS - ([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ... - Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ... + [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... - >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), - ... 3, labels=["good", "medium", "bad"]) - ... # doctest: +SKIP - [good, good, good, medium, bad, good] - Categories (3, object): [good < medium < bad] + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) + ... # doctest: +ELLIPSIS + ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... + array([0.994, 3. , 5. , 7. ])) + + Discovers the same bins, but assign them specific labels. Notice that + the returned Categorical's categories are `labels` and is ordered. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), + ... 3, labels=["bad", "medium", "good"]) + [bad, good, medium, medium, good, bad] + Categories (3, object): [bad < medium < good] - >>> pd.cut(np.ones(5), 4, labels=False) - array([1, 1, 1, 1, 1]) + ``labels=False`` implies you just want the bins back. + + >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) + array([0, 1, 1, 3]) + + Passing a Series as an input returns a Series with categorical dtype: + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, 3) + ... # doctest: +ELLIPSIS + a (1.992, 4.667] + b (1.992, 4.667] + c (4.667, 7.333] + d (7.333, 10.0] + e (7.333, 10.0] + dtype: category + Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... + + Passing an IntervalIndex for `bins` results in those categories exactly. + Notice that values not covered by the IntervalIndex are set to NaN. 0 + is to the left of the first bin (which is closed on the right), and 1.5 + falls between two bins. + + >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) + [NaN, (0, 1], NaN, (2, 3], (4, 5]] + Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X ] PR title is "DOC: update the <your-function-or-method> docstring" - [X ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ############################ Docstring (pandas.cut) ############################ ################################################################################ Bin `x` and return data about the bin to which each `x` value belongs. Splits `x` into the specified number of equal-width half-open bins. Based on the parameters specified and the input, returns data about the half-open bins to which each value of `x` belongs or the bins themselves. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or pandas.IntervalIndex If int, defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the min or max values of `x`. If a sequence, defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. right : bool, default 'True' Indicates whether the `bins` include the rightmost edge or not. If `right == True` (the default), then the `bins` [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. retbins : bool, default 'False' Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default '3' The precision at which to store and display the bins labels. include_lowest : bool, default 'False' Whether the first interval should be left-inclusive or not. Returns ------- out : pandas.Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True : returns a Series for Series `x` or a pandas.Categorical for pandas.Categorial `x`. * False : returns an ndarray of integers. bins : numpy.ndarray of floats Returned when `retbins` is 'True'. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. pandas.Categorical : Represents a categorical variable in classic R / S-plus fashion. Series : One-dimensional ndarray with axis labels (including time series). pandas.IntervalIndex : Immutable Index implementing an ordered, sliceable set. IntervalIndex represents an Index of intervals that are all closed on the same side. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting pandas.Categorical object. Examples -------- >>> pd.cut(np.array([1,7,5,4,6,3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1,7,5,4,6,3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), ... 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, good, medium, bad, good] Categories (3, object): [good < medium < bad] >>> pd.cut(np.ones(5, dtype='int64'), 4, labels=False) array([1, 1, 1, 1, 1], dtype=int64) >>> s = pd.Series(np.array([2,4,6,8,10]), index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.cut" correct. :) ``` Comment: Resubmitting https://github.com/pandas-dev/pandas/pull/20069 after a botched rebase.
https://api.github.com/repos/pandas-dev/pandas/pulls/20104
2018-03-10T10:08:23Z
2018-03-16T11:10:27Z
2018-03-16T11:10:26Z
2018-06-09T11:34:27Z
Doc: Update the DatetimeIndex.strftime docstring
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e673bfe411cb4..dae5324da41be 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -61,6 +61,8 @@ def strftime(self, date_format): return np.asarray(self.format(date_format=date_format), dtype=compat.text_type) strftime.__doc__ = """ + Convert to string array using specified date_format. + Return an array of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc <{0}>`__ @@ -68,11 +70,32 @@ def strftime(self, date_format): Parameters ---------- date_format : str - date format string (e.g. "%Y-%m-%d") + Date format string (e.g. "%Y-%m-%d"). Returns ------- - ndarray of formatted strings + numpy.ndarray + NumPy array of formatted strings + + See Also + -------- + pandas.to_datetime : Convert the given argument to datetime + DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. + DatetimeIndex.round : Round the DatetimeIndex to the specified freq. + DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. + + Examples + -------- + >>> import datetime + >>> data = pd.date_range(datetime.datetime(2018,3,10,19,27,52), + ... periods=4, freq='B') + >>> df = pd.DataFrame(data, columns=['date']) + >>> df.date[1] + Timestamp('2018-03-13 19:27:52') + >>> df.date[1].strftime('%d-%m-%Y') + '13-03-2018' + >>> df.date[1].strftime('%B %d, %Y, %r') + 'March 13, 2018, 07:27:52 PM' """.format("https://docs.python.org/3/library/datetime.html" "#strftime-and-strptime-behavior")
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [ ] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.DatetimeIndex.strftime) ################## ################################################################################ Convert to specified date_format. Return an array of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc <https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior>`__ Parameters ---------- date_format : str Date format string (e.g. "%Y-%m-%d"). Returns ------- numpy.ndarray n-dimensional array of formatted strings See Also -------- DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. DatetimeIndex.round : Round the DatetimeIndex to the specified freq. DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. Examples -------- >>> data = ['2015-05-01 18:47:05.060000','2014-05-01 18:47:05.110000'] >>> df = pd.DataFrame(data, columns=['date']) >>> df.date = pd.to_datetime(df.date) >>> df.date[1] Timestamp('2014-05-01 18:47:05.110000') >>> df.date[1].strftime('%d-%m-%Y') '01-05-2014' >>> df.date[1].strftime('%B %d, %Y, %r') 'May 01, 2014, 06:47:05 PM' ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DatetimeIndex.strftime" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
https://api.github.com/repos/pandas-dev/pandas/pulls/20103
2018-03-10T09:30:58Z
2018-03-13T23:19:24Z
2018-03-13T23:19:24Z
2018-03-13T23:19:29Z
DOC: update the pd.DataFrame.memory_usage/empty docstring(Seoul)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..af3d5a0f93cce 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1943,32 +1943,88 @@ def _sizeof_fmt(num, size_qualifier): _put_lines(buf, lines) def memory_usage(self, index=True, deep=False): - """Memory usage of DataFrame columns. + """ + Return the memory usage of each column in bytes. + + The memory usage can optionally include the contribution of + the index and elements of `object` dtype. + + This value is displayed in `DataFrame.info` by default. This can be + suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- - index : bool - Specifies whether to include memory usage of DataFrame's - index in returned Series. If `index=True` (default is False) - the first index of the Series is `Index`. - deep : bool - Introspect the data deeply, interrogate - `object` dtypes for system-level memory consumption + index : bool, default True + Specifies whether to include the memory usage of the DataFrame's + index in returned Series. If ``index=True`` the memory usage of the + index the first item in the output. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned values. Returns ------- sizes : Series - A series with column names as index and memory usage of - columns with units of bytes. - - Notes - ----- - Memory usage does not include memory consumed by elements that - are not components of the array if deep=False + A Series whose index is the original column names and whose values + is the memory usage of each column in bytes. See Also -------- - numpy.ndarray.nbytes + numpy.ndarray.nbytes : Total bytes consumed by the elements of an + ndarray. + Series.memory_usage : Bytes consumed by a Series. + pandas.Categorical : Memory-efficient array for string values with + many repeated values. + DataFrame.info : Concise summary of a DataFrame. + + Examples + -------- + >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] + >>> data = dict([(t, np.ones(shape=5000).astype(t)) + ... for t in dtypes]) + >>> df = pd.DataFrame(data) + >>> df.head() + int64 float64 complex128 object bool + 0 1 1.0 (1+0j) 1 True + 1 1 1.0 (1+0j) 1 True + 2 1 1.0 (1+0j) 1 True + 3 1 1.0 (1+0j) 1 True + 4 1 1.0 (1+0j) 1 True + + >>> df.memory_usage() + Index 80 + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + >>> df.memory_usage(index=False) + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + The memory footprint of `object` dtype columns is ignored by default: + + >>> df.memory_usage(deep=True) + Index 80 + int64 40000 + float64 40000 + complex128 80000 + object 160000 + bool 5000 + dtype: int64 + + Use a Categorical for efficient storage of an object-dtype column with + many repeated values. + + >>> df['object'].astype('category').memory_usage(deep=True) + 5168 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..4a2698290166f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1436,12 +1436,20 @@ def __contains__(self, key): @property def empty(self): - """True if NDFrame is entirely empty [no items], meaning any of the + """ + Indicator whether DataFrame is empty. + + True if DataFrame is entirely empty (no items), meaning any of the axes are of length 0. + Returns + ------- + bool + If DataFrame is empty, return True, if not return False. + Notes ----- - If NDFrame contains only NaNs, it is still not considered empty. See + If DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ################################################################################ ###################### Docstring (pandas.DataFrame.empty) ###################### ################################################################################ True if DataFrame is empty. True if DataFrame is entirely empty [no items], meaning any of the axes are of length 0. Returns ------- empty : boolean if DataFrame is empty, return true, if not return false. Notes ----- If DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True See also -------- pandas.Series.dropna pandas.DataFrame.dropna ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Missing description for See Also "pandas.Series.dropna" reference Missing description for See Also "pandas.DataFrame.dropna" reference ################################################################################ ################## Docstring (pandas.DataFrame.memory_usage) ################## ################################################################################ Memory usage of DataFrame columns. Memory usage of DataFrame is accessing pandas.DataFrame.info method. A configuration option, `display.memory_usage` (see Parameters) Parameters ---------- index : bool Specifies whether to include memory usage of DataFrame's index in returned Series. If `index=True` (default is False) the first index of the Series is `Index`. deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption. Returns ------- sizes : Series A series with column names as index and memory usage of columns with units of bytes. Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.random.randint(100, size=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.memory_usage() Index 80 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=True) Index 80 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=True).sum() 205080 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Missing description for See Also "numpy.ndarray.nbytes" reference ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Lastly, I left errors already occurred in the previous version without changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/20102
2018-03-10T08:54:45Z
2018-03-15T21:58:34Z
2018-03-15T21:58:34Z
2018-03-15T21:58:49Z
DOC: update the axes, shape, dim and size property docstring (Seoul)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..4b3db21dc87c0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -550,8 +550,17 @@ def _get_axes(N, K, index=index, columns=columns): @property def axes(self): """ - Return a list with the row axis labels and column axis labels as the - only members. They are returned in that order. + Return a list representing the axes of the DataFrame. + + It has the row axis labels and column axis labels as the only members. + They are returned in that order. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.axes + [RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'], + dtype='object')] """ return [self.index, self.columns] @@ -559,6 +568,21 @@ def axes(self): def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. + + See Also + -------- + ndarray.shape + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.shape + (2, 2) + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], + ... 'col3': [5, 6]}) + >>> df.shape + (2, 3) """ return len(self.index), len(self.columns) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..ff65c88970b86 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -457,12 +457,49 @@ def axes(self): @property def ndim(self): - """Number of axes / array dimensions""" + """ + Return an int representing the number of axes / array dimensions. + + Return 1 if Series. Otherwise return 2 if DataFrame. + + See Also + -------- + ndarray.ndim + + Examples + -------- + >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) + >>> s.ndim + 1 + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.ndim + 2 + """ return self._data.ndim @property def size(self): - """number of elements in the NDFrame""" + """ + Return an int representing the number of elements in this object. + + Return the number of rows if Series. Otherwise return the number of + rows times number of columns if DataFrame. + + See Also + -------- + ndarray.size + + Examples + -------- + >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) + >>> s.size + 3 + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.size + 4 + """ return np.prod(self.shape) @property
This change includes the following property axes, shape, ndim, size Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") $ scripts/validate_docstrings.py pandas.DataFrame.axes ################################################################################ ###################### Docstring (pandas.DataFrame.axes) ###################### ################################################################################ Return a list representing the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'], dtype='object')] ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No summary found (a short summary in a single line should be present at the beginning of the docstring) No returns section found See Also section not found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 8, in pandas.DataFrame.axes Failed example: df.axes Expected: [RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'], dtype='object')] Got: [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] $ scripts/validate_docstrings.py pandas.DataFrame.ndim ################################################################################ ###################### Docstring (pandas.DataFrame.ndim) ###################### ################################################################################ Return an int representing the number of axes / array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 >>> df = pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6], ... 'col3': [7, 8, 9]}) >>> df.ndim 2 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No returns section found See Also section not found $ scripts/validate_docstrings.py pandas.DataFrame.size ################################################################################ ###################### Docstring (pandas.DataFrame.size) ###################### ################################################################################ Return a numpy.int64 representing the number of elements in this object. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 >>> df = pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6], ... 'col3': [7, 8, 9]}) >>> df.size 9 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No summary found (a short summary in a single line should be present at the beginning of the docstring) No returns section found See Also section not found $ scripts/validate_docstrings.py pandas.DataFrame.shape ################################################################################ ###################### Docstring (pandas.DataFrame.shape) ###################### ################################################################################ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col0': [1, 2, 3], 'col2': [4, 5, 6], ... 'col3': [7, 8, 9]}) >>> df.shape (3, 3) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No returns section found See Also section not found ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Current change [1][2][3][4] occurred following error "No returns section found", "See Also section nod found". Because this function is property. so I did not make Return and Also section. And then axes[1] property occurred "Examples do not pass tests". I made a newline in the result to avoid flake8 error. It raises the above error. [1] pandas.DataFrame.axes [2] pandas.DataFrame.ndim [3] pandas.DataFrame.size [4] pandas.DataFrame.shape
https://api.github.com/repos/pandas-dev/pandas/pulls/20101
2018-03-10T08:33:35Z
2018-03-13T15:05:49Z
2018-03-13T15:05:49Z
2018-03-15T22:27:02Z
DOC: update the dtypes/ftypes docstring (Seoul)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..8fedaabca84a8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4275,7 +4275,36 @@ def get_ftype_counts(self): @property def dtypes(self): - """Return the dtypes in this object.""" + """ + Return the dtypes in the DataFrame. + + This returns a Series with the data type of each column. + The result's index is the original DataFrame's columns. Columns + with mixed types are stored with the ``object`` dtype. See + :ref:`the User Guide <basics.dtypes>` for more. + + Returns + ------- + pandas.Series + The data type of each column. + + See Also + -------- + pandas.DataFrame.ftypes : dtype and sparsity information. + + Examples + -------- + >>> df = pd.DataFrame({'float': [1.0], + ... 'int': [1], + ... 'datetime': [pd.Timestamp('20180310')], + ... 'string': ['foo']}) + >>> df.dtypes + float float64 + int int64 + datetime datetime64[ns] + string object + dtype: object + """ from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_) @@ -4283,8 +4312,45 @@ def dtypes(self): @property def ftypes(self): """ - Return the ftypes (indication of sparse/dense and dtype) - in this object. + Return the ftypes (indication of sparse/dense and dtype) in DataFrame. + + This returns a Series with the data type of each column. + The result's index is the original DataFrame's columns. Columns + with mixed types are stored with the ``object`` dtype. See + :ref:`the User Guide <basics.dtypes>` for more. + + Returns + ------- + pandas.Series + The data type and indication of sparse/dense of each column. + + See Also + -------- + pandas.DataFrame.dtypes: Series with just dtype information. + pandas.SparseDataFrame : Container for sparse tabular data. + + Notes + ----- + Sparse data should have the same dtypes as its dense representation. + + Examples + -------- + >>> import numpy as np + >>> arr = np.random.RandomState(0).randn(100, 4) + >>> arr[arr < .8] = np.nan + >>> pd.DataFrame(arr).ftypes + 0 float64:dense + 1 float64:dense + 2 float64:dense + 3 float64:dense + dtype: object + + >>> pd.SparseDataFrame(arr).ftypes + 0 float64:sparse + 1 float64:sparse + 2 float64:sparse + 3 float64:sparse + dtype: object """ from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis,
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` # paste output of "scripts/validate_docstrings.py <your-function-or-method>" here # between the "```" (remove this comment, but keep the "```") ################################################################################ ##################### Docstring (pandas.DataFrame.dtypes) ##################### ################################################################################ Return the dtypes in this object. Notes ----- It returns a Series with the data type of each column. If object contains data multiple dtypes in a single column, dtypes will be chosen to accommodate all of the data types. ``object`` is the most general. Examples -------- >>> df = pd.DataFrame({'f': pd.np.random.rand(3), ... 'i': 1, ... 'd': pd.Timestamp('20180310'), ... 'o': 'foo'}) >>> df.dtypes f float64 i int64 d datetime64[ns] o object dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No returns section found See Also section not found ################################################################################ ##################### Docstring (pandas.DataFrame.ftypes) ##################### ################################################################################ Return the ftypes (indication of sparse/dense and dtype) in this object. Notes ----- Sparse data should have the same dtypes as its dense representation See Also -------- dtypes, SparseDataFrame Examples -------- >>> arr = pd.np.random.randn(100, 4) >>> arr[arr < .8] = pd.np.nan >>> pd.DataFrame(arr).ftypes 0 float64:dense 1 float64:dense 2 float64:dense 3 float64:dense dtype: object >>> pd.SparseDataFrame(arr).ftypes 0 float64:sparse 1 float64:sparse 2 float64:sparse 3 float64:sparse dtype: object ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No summary found (a short summary in a single line should be present at the beginning of the docstring) No returns section found Missing description for See Also "dtypes" reference Missing description for See Also "SparseDataFrame" reference ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Lastly, I left errors already occurred in the previous version without changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/20100
2018-03-10T08:32:45Z
2018-03-12T21:11:07Z
2018-03-12T21:11:07Z
2018-03-12T21:11:07Z
DOC: update the docstring for several functions and properties (Seoul).
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a66d00fff9714..7092887975727 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2432,8 +2432,8 @@ def eval(self, expr, inplace=False, **kwargs): return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None): - """Return a subset of a DataFrame including/excluding columns based on - their ``dtype``. + """ + Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- @@ -2471,25 +2471,27 @@ def select_dtypes(self, include=None, exclude=None): Examples -------- - >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'), + >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c - 0 0.3962 True 1.0 - 1 0.1459 False 2.0 - 2 0.2623 True 1.0 - 3 0.0764 False 2.0 - 4 -0.9703 True 1.0 - 5 -1.2094 False 2.0 + 0 1 True 1.0 + 1 2 False 2.0 + 2 1 True 1.0 + 3 2 False 2.0 + 4 1 True 1.0 + 5 2 False 2.0 + >>> df.select_dtypes(include='bool') - c + b 0 True 1 False 2 True 3 False 4 True 5 False + >>> df.select_dtypes(include=['float64']) c 0 1.0 @@ -2498,14 +2500,15 @@ def select_dtypes(self, include=None, exclude=None): 3 2.0 4 1.0 5 2.0 - >>> df.select_dtypes(exclude=['floating']) - b - 0 True - 1 False - 2 True - 3 False - 4 True - 5 False + + >>> df.select_dtypes(exclude=['int']) + b c + 0 True 1.0 + 1 False 2.0 + 2 True 1.0 + 3 False 2.0 + 4 True 1.0 + 5 False 2.0 """ if not is_list_like(include): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..0074665505fee 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4260,16 +4260,116 @@ def _get_values(self): return self.values def get_values(self): - """same as values (but handles sparseness conversions)""" + """ + Return an ndarray after converting sparse values to dense. + + This is the same as ``.values`` for non-sparse data. For sparse + data contained in a `pandas.SparseArray`, the data are first + converted to a dense representation. + + Returns + ------- + numpy.ndarray + Numpy representation of DataFrame + + See Also + -------- + values : Numpy representation of DataFrame. + pandas.SparseArray : Container for sparse data. + + Examples + -------- + >>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False], + ... 'c': [1.0, 2.0]}) + >>> df + a b c + 0 1 True 1.0 + 1 2 False 2.0 + + >>> df.get_values() + array([[1, True, 1.0], [2, False, 2.0]], dtype=object) + + >>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]), + ... "c": [1.0, 2.0, 3.0]}) + >>> df + a c + 0 1.0 1.0 + 1 NaN 2.0 + 2 NaN 3.0 + + >>> df.get_values() + array([[ 1., 1.], + [nan, 2.], + [nan, 3.]]) + """ return self.values def get_dtype_counts(self): - """Return the counts of dtypes in this object.""" + """ + Return counts of unique dtypes in this object. + + Returns + ------- + dtype : Series + Series with the count of columns with each dtype. + + See Also + -------- + dtypes : Return the dtypes in this object. + + Examples + -------- + >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] + >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) + >>> df + str int float + 0 a 1 1.0 + 1 b 2 2.0 + 2 c 3 3.0 + + >>> df.get_dtype_counts() + float64 1 + int64 1 + object 1 + dtype: int64 + """ from pandas import Series return Series(self._data.get_dtype_counts()) def get_ftype_counts(self): - """Return the counts of ftypes in this object.""" + """ + Return counts of unique ftypes in this object. + + This is useful for SparseDataFrame or for DataFrames containing + sparse arrays. + + Returns + ------- + dtype : Series + Series with the count of columns with each type and + sparsity (dense/sparse) + + See Also + -------- + ftypes : Return ftypes (indication of sparse/dense and dtype) in + this object. + + Examples + -------- + >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] + >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) + >>> df + str int float + 0 a 1 1.0 + 1 b 2 2.0 + 2 c 3 3.0 + + >>> df.get_ftype_counts() + float64:dense 1 + int64:dense 1 + object:dense 1 + dtype: int64 + """ from pandas import Series return Series(self._data.get_ftype_counts())
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` `pd.DataFrame.get_dtype_counts` ################################################################################ ################ Docstring (pandas.DataFrame.get_dtype_counts) ################ ################################################################################ Return counts of unique dtypes in this object. Returns ------- dtype Number of dtype See Also -------- dtypes : Return the dtypes in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df['int'].astype(int) >>> df['float'].astype(float) >>> df.get_dtype_counts() float64 1 int64 1 object 1 dtype: int64 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 16, in pandas.DataFrame.get_dtype_counts Failed example: df['int'].astype(int) Expected nothing Got: 0 1 1 2 2 3 Name: int, dtype: int64 ********************************************************************** Line 17, in pandas.DataFrame.get_dtype_counts Failed example: df['float'].astype(float) Expected nothing Got: 0 1.0 1 2.0 2 3.0 Name: float, dtype: float64 `pd.DataFrame.get_ftype_counts` ################################################################################ ################ Docstring (pandas.DataFrame.get_ftype_counts) ################ ################################################################################ Return counts of unique ftypes in this object. Returns ------- dtype Number of dtype:dense|sparse See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df['int'].astype(int) >>> df['float'].astype(float) >>> df.get_dtype_counts() float64:dense 1 int64:dense 1 object:dense 1 dtype: int64 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 17, in pandas.DataFrame.get_ftype_counts Failed example: df['int'].astype(int) Expected nothing Got: 0 1 1 2 2 3 Name: int, dtype: int64 ********************************************************************** Line 18, in pandas.DataFrame.get_ftype_counts Failed example: df['float'].astype(float) Expected nothing Got: 0 1.0 1 2.0 2 3.0 Name: float, dtype: float64 ********************************************************************** Line 19, in pandas.DataFrame.get_ftype_counts Failed example: df.get_dtype_counts() Expected: float64:dense 1 int64:dense 1 object:dense 1 dtype: int64 Got: float64 1 int64 1 object 1 dtype: int64 `pd.DataFrame.select_dtypes` ################################################################################ ################## Docstring (pandas.DataFrame.select_dtypes) ################## ################################################################################ Return a subset of a DataFrame including/excluding columns based on their ``dtype``. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Returns ------- subset : DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'), ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 0.3962 True 1.0 1 0.1459 False 2.0 2 0.2623 True 1.0 3 0.0764 False 2.0 4 -0.9703 True 1.0 5 -1.2094 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['floating']) b 0 True 1 False 2 True 3 False 4 True 5 False ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No summary found (a short summary in a single line should be present at the beginning of the docstring) Errors in parameters section Parameters {'exclude', 'include'} not documented Unknown parameters {'include, exclude'} See Also section not found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 44, in pandas.DataFrame.select_dtypes Failed example: df Expected: a b c 0 0.3962 True 1.0 1 0.1459 False 2.0 2 0.2623 True 1.0 3 0.0764 False 2.0 4 -0.9703 True 1.0 5 -1.2094 False 2.0 Got: a b c 0 1.941085 True 1.0 1 1.050210 False 2.0 2 1.936395 True 1.0 3 -1.503260 False 2.0 4 -0.155825 True 1.0 5 0.852338 False 2.0 `pd.DataFrame.values` ################################################################################ ##################### Docstring (pandas.DataFrame.values) ##################### ################################################################################ Return NDFrame as ndarray or ndarray-like depending on the dtype. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a flot64 dtype. Examples -------- >>> df = pd.DataFrame({'a': np.random.randn(2).astype('f4'), ... 'b': [True, False], 'c': [1.0, 2.0]}) >>> type(df.values) <class 'numpy.ndarray'> >>> df.values array([[0.25209328532218933, True, 1.0], [0.35383567214012146, False, 2.0]], dtype=object) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found No returns section found Private classes (['NDFrame']) should not be mentioned in public docstring. See Also section not found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 22, in pandas.DataFrame.values Failed example: df.values Expected: array([[0.25209328532218933, True, 1.0], [0.35383567214012146, False, 2.0]], dtype=object) Got: array([[-0.8504104018211365, True, 1.0], [-0.9855750203132629, False, 2.0]], dtype=object) `pd.DataFrame.get_values` ################################################################################ ################### Docstring (pandas.DataFrame.get_values) ################### ################################################################################ Same as values (but handles sparseness conversions). Returns ------- numpy.ndaray Numpy representation of NDFrame Examples -------- >>> df = pd.DataFrame({'a': np.random.randn(2).astype('f4'), ... 'b': [True, False], 'c': [1.0, 2.0]}) >>> df.get_values() array([[0.25209328532218933, True, 1.0], [0.35383567214012146, False, 2.0]], dtype=object) ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No extended summary found Private classes (['NDFrame']) should not be mentioned in public docstring. See Also section not found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 13, in pandas.DataFrame.get_values Failed example: df.get_values() Expected: array([[0.25209328532218933, True, 1.0], [0.35383567214012146, False, 2.0]], dtype=object) Got: array([[-1.3661248683929443, True, 1.0], [-0.5633015632629395, False, 2.0]], dtype=object) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. -> Most of them occur because of missing extended summaries. Functions and properties I added docstrings are fairly well explained without extended summaries I think. Some of the examples are failed because I used random functions for several examples. This makes different results in each execution. Also, I omitted some outputs because of its simplicity and clearness. Lastly, I left errors already occurred in the previous version without changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/20099
2018-03-10T08:31:36Z
2018-03-13T14:44:44Z
2018-03-13T14:44:44Z
2018-03-13T14:44:49Z
DOC: update the GroupBy.apply docstring
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index cb045b08f3629..4b0143b3e1ced 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -55,28 +55,28 @@ class providing the base-class of operations. _apply_docs = dict( template=""" - Apply function ``func`` group-wise and combine the results together. + Apply function `func` group-wise and combine the results together. - The function passed to ``apply`` must take a {input} as its first - argument and return a dataframe, a series or a scalar. ``apply`` will + The function passed to `apply` must take a {input} as its first + argument and return a DataFrame, Series or scalar. `apply` will then take care of combining the results back together into a single - dataframe or series. ``apply`` is therefore a highly flexible + dataframe or series. `apply` is therefore a highly flexible grouping method. - While ``apply`` is a very flexible method, its downside is that - using it can be quite a bit slower than using more specific methods. - Pandas offers a wide range of method that will be much faster - than using ``apply`` for their specific purposes, so try to use them - before reaching for ``apply``. + While `apply` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods + like `agg` or `transform`. Pandas offers a wide range of method that will + be much faster than using `apply` for their specific purposes, so try to + use them before reaching for `apply`. Parameters ---------- - func : function + func : callable A callable that takes a {input} as its first argument, and returns a dataframe, a series or a scalar. In addition the - callable may take positional and keyword arguments + callable may take positional and keyword arguments. args, kwargs : tuple and dict - Optional positional and keyword arguments to pass to ``func`` + Optional positional and keyword arguments to pass to `func`. Returns ------- @@ -84,9 +84,9 @@ class providing the base-class of operations. Notes ----- - In the current implementation ``apply`` calls func twice on the + In the current implementation `apply` calls `func` twice on the first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if func has + path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first group. @@ -98,38 +98,43 @@ class providing the base-class of operations. -------- pipe : Apply function to the full GroupBy object instead of to each group. - aggregate, transform + aggregate : Apply aggregate function to the GroupBy object. + transform : Apply function column-by-column to the GroupBy object. + Series.apply : Apply a function to a Series. + DataFrame.apply : Apply a function to each row or column of a DataFrame. """, dataframe_examples=""" - >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]}) + >>> df = pd.DataFrame({'A': 'a a b'.split(), + 'B': [1,2,3], + 'C': [4,6, 5]}) >>> g = df.groupby('A') - From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``. - Calling ``apply`` in various ways, we can get different grouping results: + Notice that ``g`` has two groups, ``a`` and ``b``. + Calling `apply` in various ways, we can get different grouping results: - Example 1: below the function passed to ``apply`` takes a dataframe as - its argument and returns a dataframe. ``apply`` combines the result for - each group together into a new dataframe: + Example 1: below the function passed to `apply` takes a DataFrame as + its argument and returns a DataFrame. `apply` combines the result for + each group together into a new DataFrame: - >>> g.apply(lambda x: x / x.sum()) + >>> g[['B', 'C']].apply(lambda x: x / x.sum()) B C 0 0.333333 0.4 1 0.666667 0.6 2 1.000000 1.0 - Example 2: The function passed to ``apply`` takes a dataframe as - its argument and returns a series. ``apply`` combines the result for - each group together into a new dataframe: + Example 2: The function passed to `apply` takes a DataFrame as + its argument and returns a Series. `apply` combines the result for + each group together into a new DataFrame: - >>> g.apply(lambda x: x.max() - x.min()) + >>> g[['B', 'C']].apply(lambda x: x.max() - x.min()) B C A a 1 2 b 0 0 - Example 3: The function passed to ``apply`` takes a dataframe as - its argument and returns a scalar. ``apply`` combines the result for - each group together into a series, including setting the index as + Example 3: The function passed to `apply` takes a DataFrame as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as appropriate: >>> g.apply(lambda x: x.C.max() - x.B.min()) @@ -139,15 +144,15 @@ class providing the base-class of operations. dtype: int64 """, series_examples=""" - >>> ser = pd.Series([0, 1, 2], index='a a b'.split()) - >>> g = ser.groupby(ser.index) + >>> s = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g = s.groupby(s.index) - From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``. - Calling ``apply`` in various ways, we can get different grouping results: + From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``. + Calling `apply` in various ways, we can get different grouping results: - Example 1: The function passed to ``apply`` takes a series as - its argument and returns a series. ``apply`` combines the result for - each group together into a new series: + Example 1: The function passed to `apply` takes a Series as + its argument and returns a Series. `apply` combines the result for + each group together into a new Series: >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2) 0 0.0 @@ -155,9 +160,9 @@ class providing the base-class of operations. 2 4.0 dtype: float64 - Example 2: The function passed to ``apply`` takes a series as - its argument and returns a scalar. ``apply`` combines the result for - each group together into a series, including setting the index as + Example 2: The function passed to `apply` takes a Series as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as appropriate: >>> g.apply(lambda x: x.max() - x.min()) @@ -167,12 +172,12 @@ class providing the base-class of operations. """) _pipe_template = """\ -Apply a function ``func`` with arguments to this %(klass)s object and return +Apply a function `func` with arguments to this %(klass)s object and return the function's result. %(versionadded)s -Use ``.pipe`` when you want to improve readability by chaining together +Use `.pipe` when you want to improve readability by chaining together functions that expect Series, DataFrames, GroupBy or Resampler objects. Instead of writing @@ -191,17 +196,17 @@ class providing the base-class of operations. ---------- func : callable or tuple of (callable, string) Function to apply to this %(klass)s object or, alternatively, - a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a - string indicating the keyword of ``callable`` that expects the + a `(callable, data_keyword)` tuple where `data_keyword` is a + string indicating the keyword of `callable` that expects the %(klass)s object. args : iterable, optional - positional arguments passed into ``func``. + positional arguments passed into `func`. kwargs : dict, optional - a dictionary of keyword arguments passed into ``func``. + a dictionary of keyword arguments passed into `func`. Returns ------- -object : the return type of ``func``. +object : the return type of `func`. Notes ----- @@ -1442,7 +1447,7 @@ def nth(self, n, dropna=None): 2 3.0 2 5.0 - Specifying ``dropna`` allows count ignoring NaN + Specifying `dropna` allows count ignoring ``NaN`` >>> g.nth(0, dropna='any') B @@ -1458,7 +1463,7 @@ def nth(self, n, dropna=None): 1 NaN 2 NaN - Specifying ``as_index=False`` in ``groupby`` keeps the original index. + Specifying `as_index=False` in `groupby` keeps the original index. >>> df.groupby('A', as_index=False).nth(1) A B diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index a1511b726c705..3070fa0e63c88 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -59,7 +59,7 @@ class Grouper(object): sort : boolean, default to False whether to sort the resulting labels - additional kwargs to control time-like groupers (when ``freq`` is passed) + additional kwargs to control time-like groupers (when `freq` is passed) closed : closed end of interval; 'left' or 'right' label : interval boundary to use for labeling; 'left' or 'right'
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [X] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant - [X] closes #19337 Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################ Docstring (pandas.core.groupby.GroupBy.apply) ################ ################################################################################ Apply function ``func`` group-wise and combine the results together. The function passed to ``apply`` must take a dataframe as its first argument and return a dataframe, a series or a scalar. ``apply`` will then take care of combining the results back together into a single dataframe or series. ``apply`` is therefore a highly flexible grouping method. While ``apply`` is a very flexible method, its downside is that using it can be quite a bit slower than using more specific methods. Pandas offers a wide range of method that will be much faster than using ``apply`` for their specific purposes, so try to use them before reaching for ``apply``. Parameters ---------- func : function A callable that takes a dataframe as its first argument, and returns a dataframe, a series or a scalar. In addition the callable may take positional and keyword arguments. args : tuple Optional positional and keyword arguments to pass to ``func``. kwargs : dict Optional positional and keyword arguments to pass to ``func``. Returns ------- applied : Series or DataFrame Notes ----- In the current implementation ``apply`` calls func twice on the first group to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first group. Examples -------- >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]}) >>> g = df.groupby('A') From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``. Calling ``apply`` in various ways, we can get different grouping results: Example 1: below the function passed to ``apply`` takes a dataframe as its argument and returns a dataframe. ``apply`` combines the result for each group together into a new dataframe: >>> g[['B','C']].apply(lambda x: x / x.sum()) B C 0 0.333333 0.4 1 0.666667 0.6 2 1.000000 1.0 Example 2: The function passed to ``apply`` takes a dataframe as its argument and returns a series. ``apply`` combines the result for each group together into a new dataframe: >>> g[['B','C']].apply(lambda x: x.max() - x.min()) B C A a 1 2 b 0 0 Example 3: The function passed to ``apply`` takes a dataframe as its argument and returns a scalar. ``apply`` combines the result for each group together into a series, including setting the index as appropriate: >>> g.apply(lambda x: x.C.max() - x.B.min()) A a 5 b 2 dtype: int64 See also -------- pipe : Apply function to the full GroupBy object instead of to each group. aggregate : Apply aggregate function to the GroupBy object. transform : Apply function column-by-column to the GroupBy object. ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.core.groupby.GroupBy.apply" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20098
2018-03-10T08:15:27Z
2018-07-21T17:00:32Z
2018-07-21T17:00:31Z
2018-07-21T17:00:43Z
DOC: update the DatetimeIndex.tz_convert(tz) docstring
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1109fc4effe4a..1dca7bc47c274 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1946,15 +1946,14 @@ def delete(self, loc): def tz_convert(self, tz): """ - Convert tz-aware DatetimeIndex from one time zone to another (using - pytz/dateutil) + Convert tz-aware DatetimeIndex from one time zone to another. Parameters ---------- tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding UTC time. + Time zone for time. Corresponding timestamps would be converted + to this time zone of the DatetimeIndex. A `tz` of None will + convert to UTC and remove the timezone information. Returns ------- @@ -1964,6 +1963,50 @@ def tz_convert(self, tz): ------ TypeError If DatetimeIndex is tz-naive. + + See Also + -------- + DatetimeIndex.tz : A timezone that has a variable offset from UTC + DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a + given time zone, or remove timezone from a tz-aware DatetimeIndex. + + Examples + -------- + With the `tz` parameter, we can change the DatetimeIndex + to other time zones: + + >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00', + ... freq='H', periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='H') + + >>> dti.tz_convert('US/Central') + DatetimeIndex(['2014-08-01 02:00:00-05:00', + '2014-08-01 03:00:00-05:00', + '2014-08-01 04:00:00-05:00'], + dtype='datetime64[ns, US/Central]', freq='H') + + With the ``tz=None``, we can remove the timezone (after converting + to UTC if necessary): + + >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H', + ... periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='H') + + >>> dti.tz_convert(None) + DatetimeIndex(['2014-08-01 07:00:00', + '2014-08-01 08:00:00', + '2014-08-01 09:00:00'], + dtype='datetime64[ns]', freq='H') """ tz = timezones.maybe_get_tz(tz)
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` (pandas_dev) root@kalih:~/pythonpanda/pandas# python -m flake8 ./pandas/core/indexes/datetimes.py (pandas_dev) root@kalih:~/pythonpanda/pandas# python scripts/validate_docstrings.py pandas.DatetimeIndex.tz_convert ################################################################################ ################# Docstring (pandas.DatetimeIndex.tz_convert) ################# ################################################################################ Convert tz-aware DatetimeIndex from one time zone to another. When using DatetimeIndex providing with timezone this method converts tz(timezone)-aware DatetimeIndex from one timezone to another. Parameters ---------- tz : string, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to time zone of the DatetimeIndex. None will remove timezone holding UTC time. Returns ------- normalized : DatetimeIndex Raises ------ TypeError If DatetimeIndex is tz-naive. See Also -------- tz_localize : Localize tz-naive DatetimeIndex to given time zone, or remove timezone from tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00', ... freq='H', periods=3) >>> dti DatetimeIndex(['2014-08-01 09:00:00', '2014-08-01 10:00:00', '2014-08-01 11:00:00'], dtype='datetime64[ns]', freq='H') >>> dti.tz_localize('Europe/Berlin').tz_convert('US/Eastern') DatetimeIndex(['2014-08-01 03:00:00-04:00', '2014-08-01 04:00:00-04:00', '2014-08-01 05:00:00-04:00'], dtype='datetime64[ns, US/Eastern]', freq='H') With the `None` parameter, we can remove the timezone: >>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DatetimeIndex.tz_convert" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20096
2018-03-10T06:02:55Z
2018-03-13T12:45:20Z
2018-03-13T12:45:20Z
2018-03-13T12:45:31Z
DOC: Update the DatetimeIndex.normalize docstring
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1f387dadfb9ae..93f6be9365283 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1964,11 +1964,41 @@ def date(self): def normalize(self): """ - Return DatetimeIndex with times to midnight. Length is unaltered + Convert times to midnight. + + The time component of the date-timeise converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. Returns ------- - normalized : DatetimeIndex + DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='H') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ new_values = conversion.date_normalize(self.asi8, self.tz) return DatetimeIndex(new_values, freq='infer', name=self.name,
Signed-off-by: Tushar Mittal <chiragmittal.mittal@gmail.com> Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################## Docstring (pandas.DatetimeIndex.normalize) ################## ################################################################################ Return DatetimeIndex with times converted to midnight. When using DatetimeIndex, the time can be converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. If the frequency is 'H','min' or 'S' it changes to None otherwise it remains unaffected. Returns ------- normalized : DatetimeIndex See Also -------- DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. DatetimeIndex.ceil : Ceil the DatetimeIndex to the specified freq. DatetimeIndex.round : Round the DatetimeIndex to the specified freq. Examples -------- >>> df = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> df DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> df.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DatetimeIndex.normalize" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly. Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint):
https://api.github.com/repos/pandas-dev/pandas/pulls/20094
2018-03-10T04:34:11Z
2018-03-15T14:28:12Z
2018-03-15T14:28:12Z
2018-03-15T14:28:13Z
FIX: add support for desc order when ranking infs with nans #19538
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e83f149db1f18..2a08ca847b39d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1022,6 +1022,7 @@ Numeric - Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) - Bug in :class:`DataFrame` flex arithmetic (e.g. ``df.add(other, fill_value=foo)``) with a ``fill_value`` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`) - Multiplication and division of numeric-dtyped :class:`Index` objects with timedelta-like scalars returns ``TimedeltaIndex`` instead of raising ``TypeError`` (:issue:`19333`) +- Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``ascending='False'`` failed to return correct ranks for infinity if ``NaN`` were present (:issue:`19538`) - Bug where ``NaN`` was returned instead of 0 by :func:`Series.pct_change` and :func:`DataFrame.pct_change` when ``fill_method`` is not ``None`` (:issue:`19873`) diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in index 9348d7525c307..b2551f3733904 100644 --- a/pandas/_libs/algos_rank_helper.pxi.in +++ b/pandas/_libs/algos_rank_helper.pxi.in @@ -135,7 +135,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True, sorted_data = values.take(_as) sorted_mask = mask.take(_as) - _indices = order[1].take(_as).nonzero()[0] + _indices = np.diff(sorted_mask).nonzero()[0] non_na_idx = _indices[0] if len(_indices) > 0 else -1 argsorted = _as.astype('i8') @@ -153,7 +153,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True, if (i == n - 1 or are_diff(util.get_value_at(sorted_data, i + 1), val) or - i == non_na_idx - 1): + i == non_na_idx): if tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = sum_ranks / dups @@ -190,7 +190,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True, count += 1.0 if (i == n - 1 or sorted_data[i + 1] != val or - i == non_na_idx - 1): + i == non_na_idx): if tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = sum_ranks / dups diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index d15325ca8ef0e..004e42e14cb93 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -16,6 +16,8 @@ from pandas.tests.series.common import TestData from pandas._libs.tslib import iNaT from pandas._libs.algos import Infinity, NegInfinity +from itertools import chain +import pandas.util._test_decorators as td class TestSeriesRank(TestData): @@ -257,38 +259,52 @@ def _check(s, expected, method='average'): series = s if dtype is None else s.astype(dtype) _check(series, results[method], method=method) - def test_rank_tie_methods_on_infs_nans(self): + @td.skip_if_no_scipy + @pytest.mark.parametrize('ascending', [True, False]) + @pytest.mark.parametrize('method', ['average', 'min', 'max', 'first', + 'dense']) + @pytest.mark.parametrize('na_option', ['top', 'bottom', 'keep']) + def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending): dtypes = [('object', None, Infinity(), NegInfinity()), ('float64', np.nan, np.inf, -np.inf)] chunk = 3 disabled = set([('object', 'first')]) - def _check(s, expected, method='average', na_option='keep'): - result = s.rank(method=method, na_option=na_option) + def _check(s, method, na_option, ascending): + exp_ranks = { + 'average': ([2, 2, 2], [5, 5, 5], [8, 8, 8]), + 'min': ([1, 1, 1], [4, 4, 4], [7, 7, 7]), + 'max': ([3, 3, 3], [6, 6, 6], [9, 9, 9]), + 'first': ([1, 2, 3], [4, 5, 6], [7, 8, 9]), + 'dense': ([1, 1, 1], [2, 2, 2], [3, 3, 3]) + } + ranks = exp_ranks[method] + if na_option == 'top': + order = [ranks[1], ranks[0], ranks[2]] + elif na_option == 'bottom': + order = [ranks[0], ranks[2], ranks[1]] + else: + order = [ranks[0], [np.nan] * chunk, ranks[1]] + expected = order if ascending else order[::-1] + expected = list(chain.from_iterable(expected)) + result = s.rank(method=method, na_option=na_option, + ascending=ascending) tm.assert_series_equal(result, Series(expected, dtype='float64')) - exp_ranks = { - 'average': ([2, 2, 2], [5, 5, 5], [8, 8, 8]), - 'min': ([1, 1, 1], [4, 4, 4], [7, 7, 7]), - 'max': ([3, 3, 3], [6, 6, 6], [9, 9, 9]), - 'first': ([1, 2, 3], [4, 5, 6], [7, 8, 9]), - 'dense': ([1, 1, 1], [2, 2, 2], [3, 3, 3]) - } - na_options = ('top', 'bottom', 'keep') for dtype, na_value, pos_inf, neg_inf in dtypes: in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk iseries = Series(in_arr, dtype=dtype) - for method, na_opt in product(exp_ranks.keys(), na_options): - ranks = exp_ranks[method] - if (dtype, method) in disabled: - continue - if na_opt == 'top': - order = ranks[1] + ranks[0] + ranks[2] - elif na_opt == 'bottom': - order = ranks[0] + ranks[2] + ranks[1] - else: - order = ranks[0] + [np.nan] * chunk + ranks[1] - _check(iseries, order, method, na_opt) + if (dtype, method) in disabled: + continue + _check(iseries, method, na_option, ascending) + + def test_rank_desc_mix_nans_infs(self): + # GH 19538 + # check descending ranking when mix nans and infs + iseries = Series([1, np.nan, np.inf, -np.inf, 25]) + result = iseries.rank(ascending=False) + exp = Series([3, np.nan, 1, 4, 2], dtype='float64') + tm.assert_series_equal(result, exp) def test_rank_methods_series(self): pytest.importorskip('scipy.stats.special')
Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################ Docstring (pandas._libs.algos.rank_1d_object) ################ ################################################################################ Fast NaN-friendly version of scipy.stats.rankdata ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Summary does not end with dot No extended summary found No returns section found See Also section not found No examples section found (pandas_dev) ``` Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [x] closes #19538 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20091
2018-03-10T02:19:39Z
2018-03-30T20:35:26Z
2018-03-30T20:35:25Z
2018-04-20T06:16:17Z
DOC: update the Series.memory_usage() docstring
diff --git a/pandas/core/series.py b/pandas/core/series.py index 069f0372ab6e1..99ae07f2d006d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2696,28 +2696,54 @@ def reindex_axis(self, labels, axis=0, **kwargs): return self.reindex(index=labels, **kwargs) def memory_usage(self, index=True, deep=False): - """Memory usage of the Series + """ + Return the memory usage of the Series. + + The memory usage can optionally include the contribution of + the index and of elements of `object` dtype. Parameters ---------- - index : bool - Specifies whether to include memory usage of Series index - deep : bool - Introspect the data deeply, interrogate - `object` dtypes for system-level memory consumption + index : bool, default True + Specifies whether to include the memory usage of the Series index. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned value. Returns ------- - scalar bytes of memory consumed - - Notes - ----- - Memory usage does not include memory consumed by elements that - are not components of the array if deep=False + int + Bytes of memory consumed. See Also -------- - numpy.ndarray.nbytes + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. + DataFrame.memory_usage : Bytes consumed by a DataFrame. + + Examples + -------- + + >>> s = pd.Series(range(3)) + >>> s.memory_usage() + 104 + + Not including the index gives the size of the rest of the data, which + is necessarily smaller: + + >>> s.memory_usage(index=False) + 24 + + The memory footprint of `object` values is ignored by default: + + >>> s = pd.Series(["a", "b"]) + >>> s.values + array(['a', 'b'], dtype=object) + >>> s.memory_usage() + 96 + >>> s.memory_usage(deep=True) + 212 """ v = super(Series, self).memory_usage(deep=deep) if index:
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [ ] It has been proofread on language by another sprint participant Output of the validation script: ``` ################################################################################ #################### Docstring (pandas.Series.memory_usage) #################### ################################################################################ Return the memory usage of the Series. The memory usage can optionally include the contribution of the index and of elements of `object` dtype. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the Series index. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned value. Returns ------- int Bytes of memory consumed. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. Examples -------- >>> s = pd.Series(range(3)) >>> s.memory_usage() 104 Not including the index gives the size of the rest of the data, which is necessarily smaller: >>> s.memory_usage(index=False) 24 The memory footprint of `object` values is ignored by default: >>> class MyClass: pass >>> s = pd.Series(MyClass()) >>> s 0 <__main__.MyClass object at ...> dtype: object >>> s.memory_usage() 88 >>> s.memory_usage(deep=True) 120 ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 43, in pandas.Series.memory_usage Failed example: s Expected: 0 <__main__.MyClass object at ...> dtype: object Got: 0 <__main__.MyClass object at 0x10f8076d8> dtype: object ``` Error due to following the recommendation about not giving non-reproducible addresses.
https://api.github.com/repos/pandas-dev/pandas/pulls/20086
2018-03-09T21:40:59Z
2018-03-10T16:32:21Z
2018-03-10T16:32:20Z
2018-03-11T14:29:35Z
DOC: update the pandas.core.resample.Resampler.backfill docstring
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 772568ee84737..4f9c22ca98f1a 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -519,21 +519,104 @@ def nearest(self, limit=None): def backfill(self, limit=None): """ - Backward fill the values + Backward fill the new missing values in the resampled data. + + In statistics, imputation is the process of replacing missing data with + substituted values [1]_. When resampling data, missing values may + appear (e.g., when the resampling frequency is higher than the original + frequency). The backward fill will replace NaN values that appeared in + the resampled data with the next value in the original sequence. + Missing values that existed in the orginal data will not be modified. Parameters ---------- limit : integer, optional - limit of how many values to fill + Limit of how many values to fill. Returns ------- - an upsampled Series + Series, DataFrame + An upsampled Series or DataFrame with backward filled NaN values. See Also -------- - Series.fillna - DataFrame.fillna + bfill : Alias of backfill. + fillna : Fill NaN values using the specified method, which can be + 'backfill'. + nearest : Fill NaN values with nearest neighbor starting from center. + pad : Forward fill NaN values. + pandas.Series.fillna : Fill NaN values in the Series using the + specified method, which can be 'backfill'. + pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the + specified method, which can be 'backfill'. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) + + Examples + -------- + + Resampling a Series: + + >>> s = pd.Series([1, 2, 3], + ... index=pd.date_range('20180101', periods=3, freq='h')) + >>> s + 2018-01-01 00:00:00 1 + 2018-01-01 01:00:00 2 + 2018-01-01 02:00:00 3 + Freq: H, dtype: int64 + + >>> s.resample('30min').backfill() + 2018-01-01 00:00:00 1 + 2018-01-01 00:30:00 2 + 2018-01-01 01:00:00 2 + 2018-01-01 01:30:00 3 + 2018-01-01 02:00:00 3 + Freq: 30T, dtype: int64 + + >>> s.resample('15min').backfill(limit=2) + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:15:00 NaN + 2018-01-01 00:30:00 2.0 + 2018-01-01 00:45:00 2.0 + 2018-01-01 01:00:00 2.0 + 2018-01-01 01:15:00 NaN + 2018-01-01 01:30:00 3.0 + 2018-01-01 01:45:00 3.0 + 2018-01-01 02:00:00 3.0 + Freq: 15T, dtype: float64 + + Resampling a DataFrame that has missing values: + + >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, + ... index=pd.date_range('20180101', periods=3, + ... freq='h')) + >>> df + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 02:00:00 6.0 5 + + >>> df.resample('30min').backfill() + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 00:30:00 NaN 3 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 01:30:00 6.0 5 + 2018-01-01 02:00:00 6.0 5 + + >>> df.resample('15min').backfill(limit=2) + a b + 2018-01-01 00:00:00 2.0 1.0 + 2018-01-01 00:15:00 NaN NaN + 2018-01-01 00:30:00 NaN 3.0 + 2018-01-01 00:45:00 NaN 3.0 + 2018-01-01 01:00:00 NaN 3.0 + 2018-01-01 01:15:00 NaN NaN + 2018-01-01 01:30:00 6.0 5.0 + 2018-01-01 01:45:00 6.0 5.0 + 2018-01-01 02:00:00 6.0 5.0 """ return self._upsample('backfill', limit=limit) bfill = backfill
Checklist for the pandas documentation sprint: - [x] PR title is "DOC: update the <your-function-or-method> docstring" - [x] The validation script passes: `scripts/validate_docstrings.py pandas.core.resample.Resampler.backfill` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python make.py --single pandas.core.resample.Resampler.backfill` (after the modification suggested [here](https://github.com/pandas-dev/pandas/pull/20083#issuecomment-371928588)) - [x] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ############# Docstring (pandas.core.resample.Resampler.backfill) ############# ################################################################################ Backward fill the new missing values in the resampled data. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. Missing values that existed in the orginal data will not be modified. Parameters ---------- limit : integer, optional Limit of how many values to fill. Returns ------- Series, DataFrame An upsampled Series or DataFrame with backward filled NaN values. See Also -------- bfill : Alias of backfill. fillna : Fill NaN values using the specified method, which can be 'backfill'. nearest : Fill NaN values with nearest neighbor starting from center. pad : Forward fill NaN values. pandas.Series.fillna : Fill NaN values in the Series using the specified method, which can be 'backfill'. pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'backfill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) Examples -------- Resampling a Series: >>> s = pd.Series([1, 2, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: H, dtype: int64 >>> s.resample('30min').backfill() 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('15min').backfill(limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15T, dtype: float64 Resampling a DataFrame that has missing values: >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, ... index=pd.date_range('20180101', periods=3, ... freq='h')) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample('30min').backfill() a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 >>> df.resample('15min').backfill(limit=2) a b 2018-01-01 00:00:00 2.0 1.0 2018-01-01 00:15:00 NaN NaN 2018-01-01 00:30:00 NaN 3.0 2018-01-01 00:45:00 NaN 3.0 2018-01-01 01:00:00 NaN 3.0 2018-01-01 01:15:00 NaN NaN 2018-01-01 01:30:00 6.0 5.0 2018-01-01 01:45:00 6.0 5.0 2018-01-01 02:00:00 6.0 5.0 ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.core.resample.Resampler.backfill" correct. :) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20083
2018-03-09T18:52:29Z
2018-03-12T12:04:41Z
2018-03-12T12:04:41Z
2018-03-12T15:05:07Z
add test cases for GroupBy.apply trivial cases
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 0561b3a1d8592..6756d25ad1707 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -233,6 +233,36 @@ def test_apply_issues(self): lambda x: x['time'][x['value'].idxmax()]) assert_series_equal(result, expected) + def test_apply_trivial(self): + # GH 20066 + # trivial apply: ignore input and return a constant dataframe. + df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'], + 'data': [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=['key', 'data']) + expected = pd.concat([df.iloc[1:], df.iloc[1:]], + axis=1, keys=['float64', 'object']) + result = df.groupby([str(x) for x in df.dtypes], + axis=1).apply(lambda x: df.iloc[1:]) + + assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason=("GH 20066; function passed into apply " + "returns a DataFrame with the same index " + "as the one to create GroupBy object.")) + def test_apply_trivial_fail(self): + # GH 20066 + # trivial apply fails if the constant dataframe has the same index + # with the one used to create GroupBy object. + df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'], + 'data': [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=['key', 'data']) + expected = pd.concat([df, df], + axis=1, keys=['float64', 'object']) + result = df.groupby([str(x) for x in df.dtypes], + axis=1).apply(lambda x: df) + + assert_frame_equal(result, expected) + def test_time_field_bug(self): # Test a fix for the following error related to GH issue 11324 When # non-key fields in a group-by dataframe contained time-based fields
- [x] regarding #20066 - 2 tests added, one should pass, one expected to fail. - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/20081
2018-03-09T16:31:43Z
2018-03-13T23:21:02Z
2018-03-13T23:21:02Z
2018-03-13T23:21:20Z
DOC: Add syntax highlighting to SAS code blocks in comparison_with_sas.rst
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst index 214667119f7e0..0354ad473544b 100644 --- a/doc/source/comparison_with_sas.rst +++ b/doc/source/comparison_with_sas.rst @@ -25,7 +25,7 @@ As is customary, we import pandas and NumPy as follows: This is often used in interactive work (e.g. `Jupyter notebook <https://jupyter.org/>`_ or terminal) - the equivalent in SAS would be: - .. code-block:: none + .. code-block:: sas proc print data=df(obs=5); run; @@ -65,7 +65,7 @@ in the ``DATA`` step. Every ``DataFrame`` and ``Series`` has an ``Index`` - which are labels on the *rows* of the data. SAS does not have an exactly analogous concept. A data set's -row are essentially unlabeled, other than an implicit integer index that can be +rows are essentially unlabeled, other than an implicit integer index that can be accessed during the ``DATA`` step (``_N_``). In pandas, if no index is specified, an integer index is also used by default @@ -87,7 +87,7 @@ A SAS data set can be built from specified values by placing the data after a ``datalines`` statement and specifying the column names. -.. code-block:: none +.. code-block:: sas data df; input x y; @@ -121,7 +121,7 @@ will be used in many of the following examples. SAS provides ``PROC IMPORT`` to read csv data into a data set. -.. code-block:: none +.. code-block:: sas proc import datafile='tips.csv' dbms=csv out=tips replace; getnames=yes; @@ -156,7 +156,7 @@ Exporting Data The inverse of ``PROC IMPORT`` in SAS is ``PROC EXPORT`` -.. code-block:: none +.. code-block:: sas proc export data=tips outfile='tips2.csv' dbms=csv; run; @@ -178,7 +178,7 @@ Operations on Columns In the ``DATA`` step, arbitrary math expressions can be used on new or existing columns. -.. code-block:: none +.. code-block:: sas data tips; set tips; @@ -207,7 +207,7 @@ Filtering Filtering in SAS is done with an ``if`` or ``where`` statement, on one or more columns. -.. code-block:: none +.. code-block:: sas data tips; set tips; @@ -233,7 +233,7 @@ If/Then Logic In SAS, if/then logic can be used to create new columns. -.. code-block:: none +.. code-block:: sas data tips; set tips; @@ -262,7 +262,7 @@ Date Functionality SAS provides a variety of functions to do operations on date/datetime columns. -.. code-block:: none +.. code-block:: sas data tips; set tips; @@ -307,7 +307,7 @@ Selection of Columns SAS provides keywords in the ``DATA`` step to select, drop, and rename columns. -.. code-block:: none +.. code-block:: sas data tips; set tips; @@ -343,7 +343,7 @@ Sorting by Values Sorting in SAS is accomplished via ``PROC SORT`` -.. code-block:: none +.. code-block:: sas proc sort data=tips; by sex total_bill; @@ -369,7 +369,7 @@ SAS determines the length of a character string with the and `LENGTHC <http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002283942.htm>`__ functions. ``LENGTHN`` excludes trailing blanks and ``LENGTHC`` includes trailing blanks. -.. code-block:: none +.. code-block:: sas data _null_; set tips; @@ -395,7 +395,7 @@ SAS determines the position of a character in a string with the ``FINDW`` takes the string defined by the first argument and searches for the first position of the substring you supply as the second argument. -.. code-block:: none +.. code-block:: sas data _null_; set tips; @@ -419,7 +419,7 @@ Substring SAS extracts a substring from a string based on its position with the `SUBSTR <http://www2.sas.com/proceedings/sugi25/25/cc/25p088.pdf>`__ function. -.. code-block:: none +.. code-block:: sas data _null_; set tips; @@ -442,7 +442,7 @@ The SAS `SCAN <http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/def function returns the nth word from a string. The first argument is the string you want to parse and the second argument specifies which word you want to extract. -.. code-block:: none +.. code-block:: sas data firstlast; input String $60.; @@ -474,7 +474,7 @@ The SAS `UPCASE <http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/d `PROPCASE <http://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/a002598106.htm>`__ functions change the case of the argument. -.. code-block:: none +.. code-block:: sas data firstlast; input String $60.; @@ -516,7 +516,7 @@ types of joins are accomplished using the ``in=`` dummy variables to track whether a match was found in one or both input frames. -.. code-block:: none +.. code-block:: sas proc sort data=df1; by key; @@ -572,7 +572,7 @@ operations, and is ignored by default for aggregations. One difference is that missing data cannot be compared to its sentinel value. For example, in SAS you could do this to filter missing values. -.. code-block:: none +.. code-block:: sas data outer_join_nulls; set outer_join; @@ -615,7 +615,7 @@ SAS's PROC SUMMARY can be used to group by one or more key variables and compute aggregations on numeric columns. -.. code-block:: none +.. code-block:: sas proc summary data=tips nway; class sex smoker; @@ -640,7 +640,7 @@ In SAS, if the group aggregations need to be used with the original frame, it must be merged back together. For example, to subtract the mean for each observation by smoker group. -.. code-block:: none +.. code-block:: sas proc summary data=tips missing nway; class smoker; @@ -679,7 +679,7 @@ replicate most other by group processing from SAS. For example, this ``DATA`` step reads the data by sex/smoker group and filters to the first entry for each. -.. code-block:: none +.. code-block:: sas proc sort data=tips; by sex smoker; @@ -719,7 +719,7 @@ Data Interop pandas provides a :func:`read_sas` method that can read SAS data saved in the XPORT or SAS7BDAT binary format. -.. code-block:: none +.. code-block:: sas libname xportout xport 'transport-file.xpt'; data xportout.tips;
Since version 2.2, Pygments supports [SAS syntax highlighting](http://pygments.org/docs/lexers/#lexer-for-sas). This PR just changes `.. code-block:: none` to `.. code-block:: sas`. In my local build, that causes the code chunks to look like this: ![image](https://user-images.githubusercontent.com/15164633/37216851-de794a62-2389-11e8-9a17-66dc2dcd880e.png) Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20080
2018-03-09T16:07:13Z
2018-03-09T16:31:15Z
2018-03-09T16:31:15Z
2018-03-09T16:31:24Z
COMPAT: Matplotlib 2.2 compatability
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py index 0cc715eda2e18..46ebd4217862d 100644 --- a/pandas/plotting/_compat.py +++ b/pandas/plotting/_compat.py @@ -1,76 +1,31 @@ # being a bit too dynamic # pylint: disable=E1101 from __future__ import division +import operator from distutils.version import LooseVersion -def _mpl_le_1_2_1(): - try: - import matplotlib as mpl - return (LooseVersion(mpl.__version__) <= LooseVersion('1.2.1') and +def _mpl_version(version, op): + def inner(): + try: + import matplotlib as mpl + except ImportError: + return False + return (op(LooseVersion(mpl.__version__), LooseVersion(version)) and str(mpl.__version__)[0] != '0') - except ImportError: - return False + return inner -def _mpl_ge_1_3_1(): - try: - import matplotlib - # The or v[0] == '0' is because their versioneer is - # messed up on dev - return (LooseVersion(matplotlib.__version__) >= - LooseVersion('1.3.1') or - str(matplotlib.__version__)[0] == '0') - except ImportError: - return False - -def _mpl_ge_1_4_0(): - try: - import matplotlib - return (LooseVersion(matplotlib.__version__) >= LooseVersion('1.4') or - str(matplotlib.__version__)[0] == '0') - except ImportError: - return False - - -def _mpl_ge_1_5_0(): - try: - import matplotlib - return (LooseVersion(matplotlib.__version__) >= LooseVersion('1.5') or - str(matplotlib.__version__)[0] == '0') - except ImportError: - return False - - -def _mpl_ge_2_0_0(): - try: - import matplotlib - return LooseVersion(matplotlib.__version__) >= LooseVersion('2.0') - except ImportError: - return False - - -def _mpl_le_2_0_0(): - try: - import matplotlib - return matplotlib.compare_versions('2.0.0', matplotlib.__version__) - except ImportError: - return False - - -def _mpl_ge_2_0_1(): - try: - import matplotlib - return LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.1') - except ImportError: - return False - - -def _mpl_ge_2_1_0(): - try: - import matplotlib - return LooseVersion(matplotlib.__version__) >= LooseVersion('2.1') - except ImportError: - return False +_mpl_ge_1_2_1 = _mpl_version('1.2.1', operator.ge) +_mpl_le_1_2_1 = _mpl_version('1.2.1', operator.le) +_mpl_ge_1_3_1 = _mpl_version('1.3.1', operator.ge) +_mpl_ge_1_4_0 = _mpl_version('1.4.0', operator.ge) +_mpl_ge_1_4_1 = _mpl_version('1.4.1', operator.ge) +_mpl_ge_1_5_0 = _mpl_version('1.5.0', operator.ge) +_mpl_ge_2_0_0 = _mpl_version('2.0.0', operator.ge) +_mpl_le_2_0_0 = _mpl_version('2.0.0', operator.le) +_mpl_ge_2_0_1 = _mpl_version('2.0.1', operator.ge) +_mpl_ge_2_1_0 = _mpl_version('2.1.0', operator.ge) +_mpl_ge_2_2_0 = _mpl_version('2.2.0', operator.ge) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 2e62b22b2b69e..f65791329f2f1 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -56,6 +56,7 @@ def setup_method(self, method): self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0() self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0() self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1() + self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0() if self.mpl_ge_1_4_0: self.bp_n_objects = 7 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 2f2931c9c86ac..e3d502cd373e4 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -8,7 +8,7 @@ import numpy as np from pandas import Index, Series, DataFrame, NaT -from pandas.compat import is_platform_mac, PY3 +from pandas.compat import PY3 from pandas.core.indexes.datetimes import date_range, bdate_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.tseries.offsets import DateOffset @@ -1357,13 +1357,13 @@ def test_plot_outofbounds_datetime(self): values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] ax.plot(values) - @td.xfail_if_mpl_2_2 - @pytest.mark.skip( - is_platform_mac(), - "skip on mac for precision display issue on older mpl") def test_format_timedelta_ticks_narrow(self): - if self.mpl_ge_2_0_0: + if self.mpl_ge_2_2_0: + expected_labels = (['-1 days 23:59:59.999999998'] + + ['00:00:00.0000000{:0>2d}'.format(2 * i) + for i in range(6)]) + elif self.mpl_ge_2_0_0: expected_labels = [''] + [ '00:00:00.00000000{:d}'.format(2 * i) for i in range(5)] + [''] @@ -1382,10 +1382,6 @@ def test_format_timedelta_ticks_narrow(self): for l, l_expected in zip(labels, expected_labels): assert l.get_text() == l_expected - @td.xfail_if_mpl_2_2 - @pytest.mark.skip( - is_platform_mac(), - "skip on mac for precision display issue on older mpl") def test_format_timedelta_ticks_wide(self): if self.mpl_ge_2_0_0: @@ -1402,6 +1398,9 @@ def test_format_timedelta_ticks_wide(self): '9 days 06:13:20', '' ] + if self.mpl_ge_2_2_0: + expected_labels[0] = '-2 days 20:13:20' + expected_labels[-1] = '10 days 10:00:00' else: expected_labels = [ '00:00:00',
Summary of changes - clean up version detection in compat - Update timedelta tick formatting tests. Matplotlib fixed tick formatting of timedeltas so they're consistent with non-timedelta formatting. closes #20031
https://api.github.com/repos/pandas-dev/pandas/pulls/20079
2018-03-09T14:48:03Z
2018-03-13T12:03:22Z
2018-03-13T12:03:22Z
2018-03-13T12:03:43Z
DOC: fix the pandas.DataFrame.add example
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 6c6a54993b669..037c9e31f7157 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -345,7 +345,7 @@ def _get_op_name(op, special): _add_example_FRAME = """ >>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'], - columns=['one']) +... columns=['one']) >>> a one a 1.0 @@ -353,8 +353,8 @@ def _get_op_name(op, special): c 1.0 d NaN >>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan], - two=[np.nan, 2, np.nan, 2]), - index=['a', 'b', 'd', 'e']) +... two=[np.nan, 2, np.nan, 2]), +... index=['a', 'b', 'd', 'e']) >>> b one two a 1.0 NaN @@ -520,7 +520,7 @@ def _get_op_name(op, special): Examples -------- >>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'], - columns=['one']) +... columns=['one']) >>> a one a 1.0 @@ -528,8 +528,8 @@ def _get_op_name(op, special): c 1.0 d NaN >>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan], - two=[np.nan, 2, np.nan, 2]), - index=['a', 'b', 'd', 'e']) +... two=[np.nan, 2, np.nan, 2]), +... index=['a', 'b', 'd', 'e']) >>> b one two a 1.0 NaN @@ -556,14 +556,14 @@ def _get_op_name(op, special): other : Series, DataFrame, or constant axis : {{0, 1, 'index', 'columns'}} For Series input, axis to match Series index on +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level fill_value : None or float value, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing -level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level Notes -----
Docstring assigned to the PyData Nairobi chapter for the sprint Checklist for the pandas documentation sprint - [x] PR title is "DOC: update the pandas.DataFrame.rmul docstring" - [x] The validation script passes: `scripts/validate_docstrings.py pandas.DataFrame.rmul` - [x] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] The html version looks good: `python doc/make.py --single pandas.DataFrame.rmul` - [ ] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: `scripts/validate_docstrings.py pandas.DataFrame.rmul` ``` ################################################################################ ###################### Docstring (pandas.DataFrame.rmul) ###################### ################################################################################ Multiplication of dataframe and other, element-wise (binary operator `rmul`). Equivalent to ``other * dataframe``, but with support to substitute a fill_value for missing data in one of the inputs. Parameters ---------- other : Series, DataFrame, or constant axis : {0, 1, 'index', 'columns'} For Series input, axis to match Series index on level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level fill_value : None or float value, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing Notes ----- Mismatched indices will be unioned together Returns ------- result : DataFrame Examples -------- >>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'], ... columns=['one']) >>> a one a 1.0 b 1.0 c 1.0 d NaN >>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan], ... two=[np.nan, 2, np.nan, 2]), ... index=['a', 'b', 'd', 'e']) >>> b one two a 1.0 NaN b NaN 2.0 d 1.0 NaN e NaN 2.0 >>> a.add(b, fill_value=0) one two a 2.0 NaN b 1.0 2.0 c 1.0 NaN d 1.0 NaN e NaN 2.0 See also -------- DataFrame.mul ################################################################################ ################################## Validation ################################## ################################################################################ Docstring for "pandas.DataFrame.rmul" correct. :) ``` If the validation script still gives errors, but you think there is a good reason to deviate in this case (and there are certainly such cases), please state this explicitly.
https://api.github.com/repos/pandas-dev/pandas/pulls/20077
2018-03-09T14:42:12Z
2018-03-15T13:48:21Z
2018-03-15T13:48:21Z
2018-03-19T21:03:01Z
DOC: Improved the docstring of errors.ParserWarning
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index af4e83f506257..ee1a2ba777559 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -53,10 +53,42 @@ class EmptyDataError(ValueError): class ParserWarning(Warning): """ - Warning that is raised in `pd.read_csv` whenever it is necessary - to change parsers (generally from 'c' to 'python') contrary to the - one specified by the user due to lack of support or functionality for - parsing particular attributes of a CSV file with the requested engine. + Warning raised when reading a file that doesn't use the default 'c' parser. + + Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change + parsers, generally from the default 'c' parser to 'python'. + + It happens due to a lack of support or functionality for parsing a + particular attribute of a CSV file with the requested engine. + + Currently, 'c' unsupported options include the following parameters: + + 1. `sep` other than a single character (e.g. regex separators) + 2. `skipfooter` higher than 0 + 3. `sep=None` with `delim_whitespace=False` + + The warning can be avoided by adding `engine='python'` as a parameter in + `pd.read_csv` and `pd.read_table` methods. + + See Also + -------- + pd.read_csv : Read CSV (comma-separated) file into DataFrame. + pd.read_table : Read general delimited file into DataFrame. + + Examples + -------- + Using a `sep` in `pd.read_csv` other than a single character: + + >>> import io + >>> csv = u'''a;b;c + ... 1;1,8 + ... 1;2,1''' + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') + ... # ParserWarning: Falling back to the 'python' engine... + + Adding `engine='python'` to `pd.read_csv` removes the Warning: + + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python') """
Checklist for the pandas documentation sprint (ignore this if you are doing an unrelated PR): - [X] PR title is "DOC: update the <your-function-or-method> docstring" - [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` - [X] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] The html version looks good: `python doc/make.py --single <your-function-or-method>` - [X] It has been proofread on language by another sprint participant Please include the output of the validation script below between the "```" ticks: ``` ################################################################################ ################### Docstring (pandas.errors.ParserWarning) ################### ################################################################################ Warning raised when reading a file that doesn't use the default parser. Thrown by `pd.read_csv` and `pd.read_table` when it is necessary to change parsers, generally from 'c' to 'python'. It happens due to lack of support or functionality for parsing particular attributes of a CSV file with the requested engine. Currently, C-unsupported options include the following parameters: 1. `sep` other than a single character (e.g. regex separators) 2. `skipfooter` higher than 0 3. `sep=None` with `delim_whitespace=False` The warning can be avoided by adding `engine='python'` as a parameter in `pd.read_csv` and `pd.read_table` methods. See Also -------- pd.read_csv : Read CSV (comma-separated) file into DataFrame. pd.read_table : Read general delimited file into DataFrame. Examples -------- Using a `sep` in `pd.read_csv` other than a single character: >>> import io >>> csv = u'''a;b;c ... 1;1,8 ... 1;2,1''' >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') Traceback (most recent call last): ... ParserWarning: Falling back to the 'python' engine... Adding `engine='python'` to `pd.read_csv` removes the Warning: >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python') scripts/validate_docstrings.py:1: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support regex separators (separators > 1 char and different from '\s+' are interpreted as regex); you can avoid this warning by specifying engine='python'. #!/usr/bin/env python ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: No returns section found Examples do not pass tests ################################################################################ ################################### Doctests ################################### ################################################################################ ********************************************************************** Line 32, in pandas.errors.ParserWarning Failed example: df = pd.read_csv(io.StringIO(csv), sep='[;,]') Expected: Traceback (most recent call last): ... ParserWarning: Falling back to the 'python' engine... Got nothing ``` I am documenting a Warning and I could not find a better way to display the warning in the html example other than using a "Traceback (most recent call last):" followed by "ParserWarning: Falling back to the 'python' engine..." in the docstring. It also says that it found errors about "No returns sections found". On what I understood this is not relevant to the docstring in hand.
https://api.github.com/repos/pandas-dev/pandas/pulls/20076
2018-03-09T14:17:11Z
2018-03-15T19:29:23Z
2018-03-15T19:29:23Z
2018-03-15T19:29:23Z
DOC/CLN: clean-up shared_docs in generic.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4e8b4e3a6bec..15cebb88faea7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3629,7 +3629,8 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, fill_axis=fill_axis, broadcast_axis=broadcast_axis) - @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature('labels', [('method', None), ('copy', True), ('level', None), @@ -4479,7 +4480,8 @@ def f(vals): # ---------------------------------------------------------------------- # Sorting - @Appender(_shared_docs['sort_values'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.sort_values.__doc__) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') @@ -4521,7 +4523,8 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, else: return self._constructor(new_data).__finalize__(self) - @Appender(_shared_docs['sort_index'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.sort_index.__doc__) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 393e7caae5fab..8fed92f7ed6b9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -643,7 +643,8 @@ def _set_axis(self, axis, labels): self._data.set_axis(axis, labels) self._clear_item_cache() - _shared_docs['transpose'] = """ + def transpose(self, *args, **kwargs): + """ Permute the dimensions of the %(klass)s Parameters @@ -663,9 +664,6 @@ def _set_axis(self, axis, labels): y : same as input """ - @Appender(_shared_docs['transpose'] % _shared_doc_kwargs) - def transpose(self, *args, **kwargs): - # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs, require_all=True) @@ -965,9 +963,8 @@ def swaplevel(self, i=-2, j=-1, axis=0): # ---------------------------------------------------------------------- # Rename - # TODO: define separate funcs for DataFrame, Series and Panel so you can - # get completion on keyword arguments. - _shared_docs['rename'] = """ + def rename(self, *args, **kwargs): + """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change @@ -975,13 +972,11 @@ def swaplevel(self, i=-2, j=-1, axis=0): Parameters ---------- - %(optional_mapper)s %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame or Panel. dict-like or functions are transformations to apply to that axis' values - %(optional_axis)s copy : boolean, default True Also copy underlying data inplace : boolean, default False @@ -1069,12 +1064,6 @@ def swaplevel(self, i=-2, j=-1, axis=0): See the :ref:`user guide <basics.rename>` for more. """ - - @Appender(_shared_docs['rename'] % dict(axes='axes keywords for this' - ' object', klass='NDFrame', - optional_mapper='', - optional_axis='')) - def rename(self, *args, **kwargs): axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) @@ -1127,8 +1116,6 @@ def f(x): else: return result.__finalize__(self) - rename.__doc__ = _shared_docs['rename'] - def rename_axis(self, mapper, axis=0, copy=True, inplace=False): """ Alter the name of the index or columns. @@ -3024,7 +3011,8 @@ def __delitem__(self, key): except KeyError: pass - _shared_docs['_take'] = """ + def _take(self, indices, axis=0, is_copy=True): + """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in @@ -3055,9 +3043,6 @@ def __delitem__(self, key): numpy.ndarray.take numpy.take """ - - @Appender(_shared_docs['_take']) - def _take(self, indices, axis=0, is_copy=True): self._consolidate_inplace() new_data = self._data.take(indices, @@ -3072,7 +3057,8 @@ def _take(self, indices, axis=0, is_copy=True): return result - _shared_docs['take'] = """ + def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): + """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in @@ -3155,9 +3141,6 @@ class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ - - @Appender(_shared_docs['take']) - def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs): if convert is not None: msg = ("The 'convert' parameter is deprecated " "and will be removed in a future version.") @@ -3580,7 +3563,9 @@ def add_suffix(self, suffix): mapper = {self._info_axis_name: f} return self.rename(**mapper) - _shared_docs['sort_values'] = """ + def sort_values(self, by=None, axis=0, ascending=True, inplace=False, + kind='quicksort', na_position='last'): + """ Sort by the values along either axis Parameters @@ -3665,17 +3650,12 @@ def add_suffix(self, suffix): 0 A 2 0 1 A 1 1 """ - - def sort_values(self, by=None, axis=0, ascending=True, inplace=False, - kind='quicksort', na_position='last'): - """ - NOT IMPLEMENTED: do not call this method, as sorting values is not - supported for Panel objects and will raise an error. - """ raise NotImplementedError("sort_values has not been implemented " "on Panel or Panel4D objects.") - _shared_docs['sort_index'] = """ + def sort_index(self, axis=0, level=None, ascending=True, inplace=False, + kind='quicksort', na_position='last', sort_remaining=True): + """ Sort object by labels (along an axis) Parameters @@ -3703,10 +3683,6 @@ def sort_values(self, by=None, axis=0, ascending=True, inplace=False, ------- sorted_obj : %(klass)s """ - - @Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame")) - def sort_index(self, axis=0, level=None, ascending=True, inplace=False, - kind='quicksort', na_position='last', sort_remaining=True): inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) @@ -3724,7 +3700,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, new_axis = labels.take(sort_index) return self.reindex(**{axis_name: new_axis}) - _shared_docs['reindex'] = """ + def reindex(self, *args, **kwargs): + """ Conform %(klass)s to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and @@ -3920,14 +3897,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, ------- reindexed : %(klass)s """ - - # TODO: Decide if we care about having different examples for different - # kinds - - @Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame", - optional_labels="", - optional_axis="")) - def reindex(self, *args, **kwargs): + # TODO: Decide if we care about having different examples for different + # kinds # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 81d1e83ee6870..1e2d4000413bb 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1215,7 +1215,8 @@ def _wrap_result(self, result, axis): return self._construct_return_type(result, axes) - @Appender(_shared_docs['reindex'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.reindex.__doc__) def reindex(self, *args, **kwargs): major = kwargs.pop("major", None) minor = kwargs.pop('minor', None) @@ -1236,7 +1237,8 @@ def reindex(self, *args, **kwargs): kwargs.pop('labels', None) return super(Panel, self).reindex(**kwargs) - @Appender(_shared_docs['rename'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.rename.__doc__) def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs): major_axis = (major_axis if major_axis is not None else kwargs.pop('major', None)) @@ -1253,7 +1255,8 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, copy=copy, limit=limit, fill_value=fill_value) - @Appender(_shared_docs['transpose'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(NDFrame.transpose.__doc__) def transpose(self, *args, **kwargs): # check if a list of axes was passed in instead as a # single *args element @@ -1536,6 +1539,13 @@ def _extract_axis(self, data, axis=0, intersect=False): return ensure_index(index) + def sort_values(self, *args, **kwargs): + """ + NOT IMPLEMENTED: do not call this method, as sorting values is not + supported for Panel objects and will raise an error. + """ + super(Panel, self).sort_values(*args, **kwargs) + Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0, stat_axis=1, aliases={'major': 'major_axis', diff --git a/pandas/core/series.py b/pandas/core/series.py index 83f80c305c5eb..82198c2b3edd5 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3496,7 +3496,8 @@ def rename(self, index=None, **kwargs): return self._set_name(index, inplace=kwargs.get('inplace')) return super(Series, self).rename(index=index, **kwargs) - @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(generic.NDFrame.reindex.__doc__) def reindex(self, index=None, **kwargs): return super(Series, self).reindex(index=index, **kwargs) @@ -3680,7 +3681,7 @@ def memory_usage(self, index=True, deep=False): v += self.index.memory_usage(deep=deep) return v - @Appender(generic._shared_docs['_take']) + @Appender(generic.NDFrame._take.__doc__) def _take(self, indices, axis=0, is_copy=False): indices = ensure_platform_int(indices) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 8ac5d81f23bb2..97cd3a0a1fb6a 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -19,7 +19,7 @@ import pandas.core.indexes.base as ibase import pandas.core.ops as ops import pandas._libs.index as libindex -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, Substitution from pandas.core.sparse.array import ( make_sparse, SparseArray, @@ -563,7 +563,8 @@ def copy(self, deep=True): return self._constructor(new_data, sparse_index=self.sp_index, fill_value=self.fill_value).__finalize__(self) - @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs) + @Substitution(**_shared_doc_kwargs) + @Appender(generic.NDFrame.reindex.__doc__) def reindex(self, index=None, method=None, copy=True, limit=None, **kwargs): @@ -592,7 +593,7 @@ def sparse_reindex(self, new_index): sparse_index=new_index, fill_value=self.fill_value).__finalize__(self) - @Appender(generic._shared_docs['take']) + @Appender(generic.NDFrame.take.__doc__) def take(self, indices, axis=0, convert=None, *args, **kwargs): if convert is not None: msg = ("The 'convert' parameter is deprecated "
xref https://github.com/pandas-dev/pandas/pull/20016
https://api.github.com/repos/pandas-dev/pandas/pulls/20074
2018-03-09T11:31:57Z
2018-10-02T20:31:37Z
2018-10-02T20:31:37Z
2018-10-02T20:31:41Z
Don't raise exceptions splitting a blank string
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 791365295c268..cb06d4e197859 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -839,6 +839,7 @@ Categorical ``self`` but in a different order (:issue:`19551`) - Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) - Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) +- Bug in :meth:`Series.str.split` with ``expand=True`` incorrectly raising an IndexError on empty strings (:issue:`20002`). - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - Bug in :class:`Series` constructor with scalar and ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19565`) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index cb55108e9d05a..75ff1ba9d5a5e 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1633,7 +1633,8 @@ def cons_row(x): if result: # propagate nan values to match longest sequence (GH 18450) max_len = max(len(x) for x in result) - result = [x * max_len if x[0] is np.nan else x for x in result] + result = [x * max_len if len(x) == 0 or x[0] is np.nan + else x for x in result] if not isinstance(expand, bool): raise ValueError("expand must be True or False") diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index a878d6ed7b052..aa94b992facfc 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -1992,6 +1992,19 @@ def test_rsplit(self): exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']]) tm.assert_series_equal(result, exp) + def test_split_blank_string(self): + # expand blank split GH 20067 + values = Series([''], name='test') + result = values.str.split(expand=True) + exp = DataFrame([[]]) + tm.assert_frame_equal(result, exp) + + values = Series(['a b c', 'a b', '', ' '], name='test') + result = values.str.split(expand=True) + exp = DataFrame([['a', 'b', 'c'], ['a', 'b', np.nan], + [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan]]) + tm.assert_frame_equal(result, exp) + def test_split_noargs(self): # #1859 s = Series(['Wes McKinney', 'Travis Oliphant'])
whatsnew: bug fix on reshaping - [X] closes #20002 - [x] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/20067
2018-03-09T03:15:03Z
2018-03-17T19:42:53Z
2018-03-17T19:42:53Z
2018-03-17T19:42:59Z
DOC: Improved the docstring of pandas.DataFrame.values
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a893b2ba1a189..9f2112729a503 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4232,7 +4232,55 @@ def as_matrix(self, columns=None): @property def values(self): - """Numpy representation of NDFrame + """ + Return a Numpy representation of the DataFrame. + + Only the values in the DataFrame will be returned, the axes labels + will be removed. + + Returns + ------- + numpy.ndarray + The values of the DataFrame. + + Examples + -------- + A DataFrame where all columns are the same type (e.g., int64) results + in an array of the same type. + + >>> df = pd.DataFrame({'age': [ 3, 29], + ... 'height': [94, 170], + ... 'weight': [31, 115]}) + >>> df + age height weight + 0 3 94 31 + 1 29 170 115 + >>> df.dtypes + age int64 + height int64 + weight int64 + dtype: object + >>> df.values + array([[ 3, 94, 31], + [ 29, 170, 115]], dtype=int64) + + A DataFrame with mixed type columns(e.g., str/object, int64, float32) + results in an ndarray of the broadest type that accommodates these + mixed types (e.g., object). + + >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), + ... ('lion', 80.5, 1), + ... ('monkey', np.nan, None)], + ... columns=('name', 'max_speed', 'rank')) + >>> df2.dtypes + name object + max_speed float64 + rank object + dtype: object + >>> df2.values + array([['parrot', 24.0, 'second'], + ['lion', 80.5, 1], + ['monkey', nan, None]], dtype=object) Notes ----- @@ -4243,8 +4291,13 @@ def values(self): e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to - int32. By numpy.find_common_type convention, mixing int64 and uint64 - will result in a flot64 dtype. + int32. By :func:`numpy.find_common_type` convention, mixing int64 + and uint64 will result in a float64 dtype. + + See Also + -------- + pandas.DataFrame.index : Retrievie the index labels + pandas.DataFrame.columns : Retrieving the column names """ self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED)
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` ################################################################################ ############ Docstring (pandas.pandas.core.generic.NDFrame.values) ############ ################################################################################ Generate and return a Numpy representation of NDFrame. Only the values in the NDFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the NDFrame Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.values array([['falcon', 'bird', 389.0], ['parrot', 'bird', 24.0], ['lion', 'mammal', 80.5], ['monkey', 'mammal', nan]], dtype=object) Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a flot64 dtype. ################################################################################ ################################## Validation ################################## ################################################################################ Errors found: Private classes (['NDFrame']) should not be mentioned in public docstring. See Also section not found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/20065
2018-03-09T01:03:14Z
2018-03-10T14:41:17Z
2018-03-10T14:41:17Z
2018-03-10T17:17:28Z
Docstring for pd.core.window.Expanding.kurt
diff --git a/pandas/core/window.py b/pandas/core/window.py index c41b07759d555..5294cdfd5662d 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -926,28 +926,7 @@ def skew(self, **kwargs): Notes ----- - A minimum of 4 periods is required for the rolling calculation. - - Examples - -------- - The below example will show a rolling calculation with a window size of - four matching the equivalent function call using `scipy.stats`. - - >>> arr = [1, 2, 3, 4, 999] - >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits - >>> import scipy.stats - >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) - -1.200000 - >>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False))) - 3.999946 - >>> s = pd.Series(arr) - >>> s.rolling(4).kurt() - 0 NaN - 1 NaN - 2 NaN - 3 -1.200000 - 4 3.999946 - dtype: float64 + A minimum of 4 periods is required for the %(name)s calculation. """) def kurt(self, **kwargs): @@ -1269,6 +1248,31 @@ def var(self, ddof=1, *args, **kwargs): def skew(self, **kwargs): return super(Rolling, self).skew(**kwargs) + _agg_doc = dedent(""" + Examples + -------- + + The example below will show a rolling calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits + >>> import scipy.stats + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) + -1.200000 + >>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False))) + 3.999946 + >>> s = pd.Series(arr) + >>> s.rolling(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 3.999946 + dtype: float64 + """) + + @Appender(_agg_doc) @Substitution(name='rolling') @Appender(_shared_docs['kurt']) def kurt(self, **kwargs): @@ -1508,6 +1512,31 @@ def var(self, ddof=1, *args, **kwargs): def skew(self, **kwargs): return super(Expanding, self).skew(**kwargs) + _agg_doc = dedent(""" + Examples + -------- + + The example below will show an expanding calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) + -1.200000 + >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """) + + @Appender(_agg_doc) @Substitution(name='expanding') @Appender(_shared_docs['kurt']) def kurt(self, **kwargs):
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Notice that I made two small changes to the ``Rolling.kurt``(by @WillAyd ) docstring too. Namely: - since window methods have ``kurt`` but not ``kurtosis`` (while e.g. ``Series`` has both), I guess it is best to tell users that ``kurt`` is the default in pandas - that is, use it in "See also" - it is true that a ``DataFrame`` can be constructed directly from a list, but this is not the standard use, so I replaced with ``Series``
https://api.github.com/repos/pandas-dev/pandas/pulls/20064
2018-03-09T01:01:56Z
2018-03-11T12:07:02Z
2018-03-11T12:07:02Z
2018-03-11T12:07:02Z
Added flake8 to DEV requirements
diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml index c72abd0c19516..1337fc54e9aac 100644 --- a/ci/environment-dev.yaml +++ b/ci/environment-dev.yaml @@ -5,6 +5,7 @@ channels: dependencies: - Cython - NumPy + - flake8 - moto - pytest>=3.1 - python-dateutil>=2.5.0 diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 82f8de277c57b..fcbe0da5de305 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -2,9 +2,10 @@ # Do not modify directly Cython NumPy +flake8 moto pytest>=3.1 python-dateutil>=2.5.0 pytz setuptools>=3.3 -sphinx +sphinx \ No newline at end of file
In advanced of the doc sprint this Saturday it looks like some users were confused by `git diff upstream/master -u -- "*.py" | flake8 --diff` failing even though they though they installed all development dependencies. Updating the .yml file to include this
https://api.github.com/repos/pandas-dev/pandas/pulls/20063
2018-03-08T23:28:33Z
2018-03-09T00:36:24Z
2018-03-09T00:36:24Z
2018-03-09T00:37:02Z
BUG: Index.difference of itself doesn't preserve type
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c08e22af295f4..b42aaac3cef96 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -939,6 +939,7 @@ Indexing - Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`) - Bug in :meth:`DataFrame.drop_duplicates` where no ``KeyError`` is raised when passing in columns that don't exist on the ``DataFrame`` (issue:`19726`) - Bug in ``Index`` subclasses constructors that ignore unexpected keyword arguments (:issue:`19348`) +- Bug in :meth:`Index.difference` when taking difference of an ``Index`` with itself (:issue:`20040`) MultiIndex diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d5daece62cba8..6cd0b5d7697d2 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -457,7 +457,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): Must be careful not to recurse. """ if not hasattr(values, 'dtype'): - if values is None and dtype is not None: + if (values is None or not len(values)) and dtype is not None: values = np.empty(0, dtype=dtype) else: values = np.array(values, copy=False) @@ -491,6 +491,8 @@ def _shallow_copy(self, values=None, **kwargs): values = self.values attributes = self._get_attributes_dict() attributes.update(kwargs) + if not len(values) and 'dtype' not in kwargs: + attributes['dtype'] = self.dtype return self._simple_new(values, **attributes) def _shallow_copy_with_infer(self, values=None, **kwargs): @@ -511,6 +513,8 @@ def _shallow_copy_with_infer(self, values=None, **kwargs): attributes = self._get_attributes_dict() attributes.update(kwargs) attributes['copy'] = False + if not len(values) and 'dtype' not in kwargs: + attributes['dtype'] = self.dtype if self._infer_as_myclass: try: return self._constructor(values, **attributes) @@ -2815,7 +2819,7 @@ def difference(self, other): self._assert_can_do_setop(other) if self.equals(other): - return Index([], name=self.name) + return self._shallow_copy([]) other, result_name = self._convert_can_do_setop(other) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 60eda70714da5..8226c4bcac494 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2755,7 +2755,7 @@ def intersection(self, other): other_tuples = other._ndarray_values uniq_tuples = sorted(set(self_tuples) & set(other_tuples)) if len(uniq_tuples) == 0: - return MultiIndex(levels=[[]] * self.nlevels, + return MultiIndex(levels=self.levels, labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: @@ -2777,7 +2777,7 @@ def difference(self, other): return self if self.equals(other): - return MultiIndex(levels=[[]] * self.nlevels, + return MultiIndex(levels=self.levels, labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index e8f05cb928cad..603fa254d5ca6 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -20,7 +20,7 @@ from pandas import (period_range, date_range, Series, DataFrame, Float64Index, Int64Index, UInt64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, - PeriodIndex, isna) + PeriodIndex, RangeIndex, isna) from pandas.core.index import _get_combined_index, _ensure_index_from_sequences from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat @@ -44,7 +44,7 @@ def setup_method(self, method): tdIndex=tm.makeTimedeltaIndex(100), intIndex=tm.makeIntIndex(100), uintIndex=tm.makeUIntIndex(100), - rangeIndex=tm.makeIntIndex(100), + rangeIndex=tm.makeRangeIndex(100), floatIndex=tm.makeFloatIndex(100), boolIndex=Index([True, False]), catIndex=tm.makeCategoricalIndex(100), @@ -57,6 +57,15 @@ def setup_method(self, method): def create_index(self): return Index(list('abcde')) + def generate_index_types(self, skip_index_keys=[]): + """ + Return a generator of the various index types, leaving + out the ones with a key in skip_index_keys + """ + for key, idx in self.indices.items(): + if key not in skip_index_keys: + yield key, idx + def test_new_axis(self): new_index = self.dateIndex[None, :] assert new_index.ndim == 2 @@ -406,6 +415,27 @@ def test_constructor_dtypes_timedelta(self): pd.TimedeltaIndex(list(values), dtype=dtype)]: tm.assert_index_equal(res, idx) + def test_constructor_empty(self): + skip_index_keys = ["repeats", "periodIndex", "rangeIndex", + "tuples"] + for key, idx in self.generate_index_types(skip_index_keys): + empty = idx.__class__([]) + assert isinstance(empty, idx.__class__) + assert not len(empty) + + empty = PeriodIndex([], freq='B') + assert isinstance(empty, PeriodIndex) + assert not len(empty) + + empty = RangeIndex(step=1) + assert isinstance(empty, pd.RangeIndex) + assert not len(empty) + + empty = MultiIndex(levels=[[1, 2], ['blue', 'red']], + labels=[[], []]) + assert isinstance(empty, MultiIndex) + assert not len(empty) + def test_view_with_args(self): restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex', @@ -1034,6 +1064,27 @@ def test_symmetric_difference(self): assert tm.equalContents(result, expected) assert result.name == 'new_name' + def test_difference_type(self): + # GH 20040 + # If taking difference of a set and itself, it + # needs to preserve the type of the index + skip_index_keys = ['repeats'] + for key, idx in self.generate_index_types(skip_index_keys): + result = idx.difference(idx) + expected = idx.drop(idx) + tm.assert_index_equal(result, expected) + + def test_intersection_difference(self): + # GH 20040 + # Test that the intersection of an index with an + # empty index produces the same index as the difference + # of an index with itself. Test for all types + skip_index_keys = ['repeats'] + for key, idx in self.generate_index_types(skip_index_keys): + inter = idx.intersection(idx.drop(idx)) + diff = idx.difference(idx) + tm.assert_index_equal(inter, diff) + def test_is_numeric(self): assert not self.dateIndex.is_numeric() assert not self.strIndex.is_numeric()
- [x] closes #20040 - [x] tests added / passed - tests/indexes/test_base.py:test_difference_type - tests/indexes/test_base.py:test_intersection_difference - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Uses `Index._shallow_copy([])`, which means fixes are related to getting that to work right. Fundamental concept is that if the result of `Index.difference` is an empty index, then result should preserve type and attributes of the object. In addition, for `MultiIndex`, if result of intersection is an empty index, then the levels are preserved.
https://api.github.com/repos/pandas-dev/pandas/pulls/20062
2018-03-08T21:52:36Z
2018-03-16T22:05:18Z
2018-03-16T22:05:18Z
2018-03-16T22:10:05Z
Add tests for docstring Validation Script + py27 compat
diff --git a/pandas/tests/scripts/__init__.py b/pandas/tests/scripts/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/scripts/test_validate_docstrings.py b/pandas/tests/scripts/test_validate_docstrings.py new file mode 100644 index 0000000000000..1d35d5d30bba3 --- /dev/null +++ b/pandas/tests/scripts/test_validate_docstrings.py @@ -0,0 +1,552 @@ +import os +import sys + +import numpy as np +import pytest + + +class GoodDocStrings(object): + """ + Collection of good doc strings. + + This class contains a lot of docstrings that should pass the validation + script without any errors. + """ + + def plot(self, kind, color='blue', **kwargs): + """ + Generate a plot. + + Render the data in the Series as a matplotlib plot of the + specified kind. + + Parameters + ---------- + kind : str + Kind of matplotlib plot. + color : str, default 'blue' + Color name or rgb code. + **kwargs + These parameters will be passed to the matplotlib plotting + function. + """ + pass + + def sample(self): + """ + Generate and return a random number. + + The value is sampled from a continuous uniform distribution between + 0 and 1. + + Returns + ------- + float + Random number generated. + """ + return random.random() # noqa: F821 + + def random_letters(self): + """ + Generate and return a sequence of random letters. + + The length of the returned string is also random, and is also + returned. + + Returns + ------- + length : int + Length of the returned string. + letters : str + String of random letters. + """ + length = random.randint(1, 10) # noqa: F821 + letters = ''.join(random.choice(string.ascii_lowercase) # noqa: F821 + for i in range(length)) + return length, letters + + def sample_values(self): + """ + Generate an infinite sequence of random numbers. + + The values are sampled from a continuous uniform distribution between + 0 and 1. + + Yields + ------ + float + Random number generated. + """ + while True: + yield random.random() # noqa: F821 + + def head(self): + """ + Return the first 5 elements of the Series. + + This function is mainly useful to preview the values of the + Series without displaying the whole of it. + + Returns + ------- + Series + Subset of the original series with the 5 first values. + + See Also + -------- + Series.tail : Return the last 5 elements of the Series. + Series.iloc : Return a slice of the elements in the Series, + which can also be used to return the first or last n. + """ + return self.iloc[:5] + + def head1(self, n=5): + """ + Return the first elements of the Series. + + This function is mainly useful to preview the values of the + Series without displaying the whole of it. + + Parameters + ---------- + n : int + Number of values to return. + + Returns + ------- + Series + Subset of the original series with the n first values. + + See Also + -------- + tail : Return the last n elements of the Series. + + Examples + -------- + >>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon']) + >>> s.head() + 0 Ant + 1 Bear + 2 Cow + 3 Dog + 4 Falcon + dtype: object + + With the `n` parameter, we can change the number of returned rows: + + >>> s.head(n=3) + 0 Ant + 1 Bear + 2 Cow + dtype: object + """ + return self.iloc[:n] + + def contains(self, pat, case=True, na=np.nan): + """ + Return whether each value contains `pat`. + + In this case, we are illustrating how to use sections, even + if the example is simple enough and does not require them. + + Parameters + ---------- + pat : str + Pattern to check for within each element. + case : bool, default True + Whether check should be done with case sensitivity. + na : object, default np.nan + Fill value for missing data. + + Examples + -------- + >>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan]) + >>> s.str.contains(pat='a') + 0 False + 1 False + 2 True + 3 NaN + dtype: object + + **Case sensitivity** + + With `case_sensitive` set to `False` we can match `a` with both + `a` and `A`: + + >>> s.str.contains(pat='a', case=False) + 0 True + 1 False + 2 True + 3 NaN + dtype: object + + **Missing values** + + We can fill missing values in the output using the `na` parameter: + + >>> s.str.contains(pat='a', na=False) + 0 False + 1 False + 2 True + 3 False + dtype: bool + """ + pass + + +class BadGenericDocStrings(object): + """Everything here has a bad docstring + """ + + def func(self): + + """Some function. + + With several mistakes in the docstring. + + It has a blank like after the signature `def func():`. + + The text 'Some function' should go in the line after the + opening quotes of the docstring, not in the same line. + + There is a blank line between the docstring and the first line + of code `foo = 1`. + + The closing quotes should be in the next line, not in this one.""" + + foo = 1 + bar = 2 + return foo + bar + + def astype(self, dtype): + """ + Casts Series type. + + Verb in third-person of the present simple, should be infinitive. + """ + pass + + def astype1(self, dtype): + """ + Method to cast Series type. + + Does not start with verb. + """ + pass + + def astype2(self, dtype): + """ + Cast Series type + + Missing dot at the end. + """ + pass + + def astype3(self, dtype): + """ + Cast Series type from its current type to the new type defined in + the parameter dtype. + + Summary is too verbose and doesn't fit in a single line. + """ + pass + + def plot(self, kind, **kwargs): + """ + Generate a plot. + + Render the data in the Series as a matplotlib plot of the + specified kind. + + Note the blank line between the parameters title and the first + parameter. Also, note that after the name of the parameter `kind` + and before the colon, a space is missing. + + Also, note that the parameter descriptions do not start with a + capital letter, and do not finish with a dot. + + Finally, the `**kwargs` parameter is missing. + + Parameters + ---------- + + kind: str + kind of matplotlib plot + """ + pass + + def method(self, foo=None, bar=None): + """ + A sample DataFrame method. + + Do not import numpy and pandas. + + Try to use meaningful data, when it makes the example easier + to understand. + + Try to avoid positional arguments like in `df.method(1)`. They + can be alright if previously defined with a meaningful name, + like in `present_value(interest_rate)`, but avoid them otherwise. + + When presenting the behavior with different parameters, do not place + all the calls one next to the other. Instead, add a short sentence + explaining what the example shows. + + Examples + -------- + >>> import numpy as np + >>> import pandas as pd + >>> df = pd.DataFrame(np.ones((3, 3)), + ... columns=('a', 'b', 'c')) + >>> df.all(1) + 0 True + 1 True + 2 True + dtype: bool + >>> df.all(bool_only=True) + Series([], dtype: bool) + """ + pass + + +class BadSummaries(object): + + def wrong_line(self): + """Exists on the wrong line""" + pass + + def no_punctuation(self): + """ + Has the right line but forgets punctuation + """ + pass + + def no_capitalization(self): + """ + provides a lowercase summary. + """ + pass + + def no_infinitive(self): + """ + Started with a verb that is not infinitive. + """ + + def multi_line(self): + """ + Extends beyond one line + which is not correct. + """ + + +class BadParameters(object): + """ + Everything here has a problem with its Parameters section. + """ + + def missing_params(self, kind, **kwargs): + """ + Lacks kwargs in Parameters. + + Parameters + ---------- + kind : str + Foo bar baz. + """ + + def bad_colon_spacing(self, kind): + """ + Has bad spacing in the type line. + + Parameters + ---------- + kind: str + Needs a space after kind. + """ + + def no_description_period(self, kind): + """ + Forgets to add a period to the description. + + Parameters + ---------- + kind : str + Doesn't end with a dot + """ + + def parameter_capitalization(self, kind): + """ + Forgets to capitalize the description. + + Parameters + ---------- + kind : str + this is not capitalized. + """ + + def blank_lines(self, kind): + """ + Adds a blank line after the section header. + + Parameters + ---------- + + kind : str + Foo bar baz. + """ + pass + + +class BadReturns(object): + + def return_not_documented(self): + """ + Lacks section for Returns + """ + return "Hello world!" + + def yield_not_documented(self): + """ + Lacks section for Yields + """ + yield "Hello world!" + + def no_type(self): + """ + Returns documented but without type. + + Returns + ------- + Some value. + """ + return "Hello world!" + + def no_description(self): + """ + Provides type but no descrption. + + Returns + ------- + str + """ + return "Hello world!" + + def no_punctuation(self): + """ + Provides type and description but no period. + + Returns + ------- + str + A nice greeting + """ + return "Hello world!" + + +class TestValidator(object): + + @pytest.fixture(autouse=True, scope="class") + def import_scripts(self): + """ + Because the scripts directory is above the top level pandas package + we need to hack sys.path to know where to find that directory for + import. The below traverses up the file system to find the scripts + directory, adds to location to sys.path and imports the required + module into the global namespace before as part of class setup, + reverting those changes on teardown. + """ + up = os.path.dirname + file_dir = up(os.path.abspath(__file__)) + script_dir = os.path.join(up(up(up(file_dir))), 'scripts') + sys.path.append(script_dir) + from validate_docstrings import validate_one + globals()['validate_one'] = validate_one + yield + sys.path.pop() + del globals()['validate_one'] + + def _import_path(self, klass=None, func=None): + """ + Build the required import path for tests in this module. + + Parameters + ---------- + klass : str + Class name of object in module. + func : str + Function name of object in module. + + Returns + ------- + str + Import path of specified object in this module + """ + base_path = 'pandas.tests.scripts.test_validate_docstrings' + if klass: + base_path = '.'.join([base_path, klass]) + if func: + base_path = '.'.join([base_path, func]) + + return base_path + + def test_good_class(self): + assert validate_one(self._import_path( # noqa: F821 + klass='GoodDocStrings')) == 0 + + @pytest.mark.parametrize("func", [ + 'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1', + 'contains']) + def test_good_functions(self, func): + assert validate_one(self._import_path( # noqa: F821 + klass='GoodDocStrings', func=func)) == 0 + + def test_bad_class(self): + assert validate_one(self._import_path( # noqa: F821 + klass='BadGenericDocStrings')) > 0 + + @pytest.mark.parametrize("func", [ + 'func', 'astype', 'astype1', 'astype2', 'astype3', 'plot', 'method']) + def test_bad_generic_functions(self, func): + assert validate_one(self._import_path( # noqa:F821 + klass='BadGenericDocStrings', func=func)) > 0 + + @pytest.mark.parametrize("klass,func,msgs", [ + # Summary tests + ('BadSummaries', 'wrong_line', + ('should start in the line immediately after the opening quotes',)), + ('BadSummaries', 'no_punctuation', + ('Summary does not end with a period',)), + ('BadSummaries', 'no_capitalization', + ('Summary does not start with a capital letter',)), + ('BadSummaries', 'no_capitalization', + ('Summary must start with infinitive verb',)), + ('BadSummaries', 'multi_line', + ('a short summary in a single line should be present',)), + # Parameters tests + ('BadParameters', 'missing_params', + ('Parameters {**kwargs} not documented',)), + ('BadParameters', 'bad_colon_spacing', + ('Parameters {kind} not documented', + 'Unknown parameters {kind: str}', + 'Parameter "kind: str" has no type')), + ('BadParameters', 'no_description_period', + ('Parameter "kind" description should finish with "."',)), + ('BadParameters', 'parameter_capitalization', + ('Parameter "kind" description should start with a capital letter',)), + pytest.param('BadParameters', 'blank_lines', ('No error yet?',), + marks=pytest.mark.xfail), + # Returns tests + ('BadReturns', 'return_not_documented', ('No Returns section found',)), + ('BadReturns', 'yield_not_documented', ('No Yields section found',)), + pytest.param('BadReturns', 'no_type', ('foo',), + marks=pytest.mark.xfail), + pytest.param('BadReturns', 'no_description', ('foo',), + marks=pytest.mark.xfail), + pytest.param('BadReturns', 'no_punctuation', ('foo',), + marks=pytest.mark.xfail) + ]) + def test_bad_examples(self, capsys, klass, func, msgs): + validate_one(self._import_path(klass=klass, func=func)) # noqa:F821 + err = capsys.readouterr().err + for msg in msgs: + assert msg in err diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index aed3eb2f1226d..cdea2d8b83abd 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -20,7 +20,6 @@ import functools import collections import argparse -import contextlib import pydoc import inspect import importlib @@ -35,9 +34,11 @@ sys.path.insert(0, os.path.join(BASE_PATH)) import pandas +from pandas.compat import signature sys.path.insert(1, os.path.join(BASE_PATH, 'doc', 'sphinxext')) from numpydoc.docscrape import NumpyDocString +from pandas.io.formats.printing import pprint_thing PRIVATE_CLASSES = ['NDFrame', 'IndexOpsMixin'] @@ -46,7 +47,7 @@ def _load_obj(obj_name): for maxsplit in range(1, obj_name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... - func_name_split = obj_name.rsplit('.', maxsplit=maxsplit) + func_name_split = obj_name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: @@ -107,7 +108,9 @@ def __len__(self): @property def is_function_or_method(self): - return inspect.isfunction(self.method_obj) + # TODO(py27): remove ismethod + return (inspect.isfunction(self.method_obj) + or inspect.ismethod(self.method_obj)) @property def source_file_name(self): @@ -181,18 +184,24 @@ def doc_parameters(self): @property def signature_parameters(self): - if (inspect.isclass(self.method_obj) - and self.method_name.split('.')[-1] in - self.method_obj._accessors): - # accessor classes have a signature, but don't want to show this - return tuple() + if inspect.isclass(self.method_obj): + if hasattr(self.method_obj, '_accessors') and ( + self.method_name.split('.')[-1] in + self.method_obj._accessors): + # accessor classes have a signature but don't want to show this + return tuple() try: - signature = inspect.signature(self.method_obj) + sig = signature(self.method_obj) except (TypeError, ValueError): # Some objects, mainly in C extensions do not support introspection # of the signature return tuple() - params = tuple(signature.parameters.keys()) + params = sig.args + if sig.varargs: + params.append("*" + sig.varargs) + if sig.keywords: + params.append("**" + sig.keywords) + params = tuple(params) if params and params[0] in ('self', 'cls'): return params[1:] return params @@ -204,10 +213,11 @@ def parameter_mismatches(self): doc_params = tuple(self.doc_parameters) missing = set(signature_params) - set(doc_params) if missing: - errs.append('Parameters {!r} not documented'.format(missing)) + errs.append( + 'Parameters {} not documented'.format(pprint_thing(missing))) extra = set(doc_params) - set(signature_params) if extra: - errs.append('Unknown parameters {!r}'.format(extra)) + errs.append('Unknown parameters {}'.format(pprint_thing(extra))) if (not missing and not extra and signature_params != doc_params and not (not signature_params and not doc_params)): errs.append('Wrong parameters order. ' + @@ -240,6 +250,14 @@ def examples(self): def returns(self): return self.doc['Returns'] + @property + def yields(self): + return self.doc['Yields'] + + @property + def method_source(self): + return inspect.getsource(self.method_obj) + @property def first_line_ends_in_dot(self): if self.doc: @@ -265,8 +283,7 @@ def examples_errors(self): error_msgs = '' for test in finder.find(self.raw_doc, self.method_name, globs=context): f = StringIO() - with contextlib.redirect_stdout(f): - runner.run(test) + runner.run(test, out=f.write) error_msgs += f.getvalue() return error_msgs @@ -380,6 +397,19 @@ def validate_all(): def validate_one(func_name): + """ + Validate the docstring for the given func_name + + Parameters + ---------- + func_name : function + Function whose docstring will be evaluated + + Returns + ------- + int + The number of errors found in the `func_name` docstring + """ func_obj = _load_obj(func_name) doc = Docstring(func_name, func_obj) @@ -387,6 +417,7 @@ def validate_one(func_name): sys.stderr.write('{}\n'.format(doc.clean_doc)) errs = [] + wrns = [] if doc.start_blank_lines != 1: errs.append('Docstring text (summary) should start in the line ' 'immediately after the opening quotes (not in the same ' @@ -405,25 +436,26 @@ def validate_one(func_name): 'should be present at the beginning of the docstring)') else: if not doc.summary[0].isupper(): - errs.append('Summary does not start with capital') + errs.append('Summary does not start with a capital letter') if doc.summary[-1] != '.': - errs.append('Summary does not end with dot') + errs.append('Summary does not end with a period') if (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append('Summary must start with infinitive verb, ' 'not third person (e.g. use "Generate" instead of ' '"Generates")') if not doc.extended_summary: - errs.append('No extended summary found') + wrns.append('No extended summary found') param_errs = doc.parameter_mismatches for param in doc.doc_parameters: - if not doc.parameter_type(param): - param_errs.append('Parameter "{}" has no type'.format(param)) - else: - if doc.parameter_type(param)[-1] == '.': - param_errs.append('Parameter "{}" type ' - 'should not finish with "."'.format(param)) + if not param.startswith("*"): # Check can ignore var / kwargs + if not doc.parameter_type(param): + param_errs.append('Parameter "{}" has no type'.format(param)) + else: + if doc.parameter_type(param)[-1] == '.': + param_errs.append('Parameter "{}" type should ' + 'not finish with "."'.format(param)) if not doc.parameter_desc(param): param_errs.append('Parameter "{}" ' @@ -431,7 +463,7 @@ def validate_one(func_name): else: if not doc.parameter_desc(param)[0].isupper(): param_errs.append('Parameter "{}" description ' - 'should start with ' + 'should start with a ' 'capital letter'.format(param)) if doc.parameter_desc(param)[-1] != '.': param_errs.append('Parameter "{}" description ' @@ -441,8 +473,11 @@ def validate_one(func_name): for param_err in param_errs: errs.append('\t{}'.format(param_err)) - if not doc.returns: - errs.append('No returns section found') + if doc.is_function_or_method: + if not doc.returns and "return" in doc.method_source: + errs.append('No Returns section found') + if not doc.yields and "yield" in doc.method_source: + errs.append('No Yields section found') mentioned_errs = doc.mentioned_private_classes if mentioned_errs: @@ -450,7 +485,7 @@ def validate_one(func_name): 'docstring.'.format(mentioned_errs)) if not doc.see_also: - errs.append('See Also section not found') + wrns.append('See Also section not found') else: for rel_name, rel_desc in doc.see_also.items(): if not rel_desc: @@ -464,7 +499,7 @@ def validate_one(func_name): examples_errs = '' if not doc.examples: - errs.append('No examples section found') + wrns.append('No examples section found') else: examples_errs = doc.examples_errors if examples_errs: @@ -475,7 +510,12 @@ def validate_one(func_name): sys.stderr.write('Errors found:\n') for err in errs: sys.stderr.write('\t{}\n'.format(err)) - else: + if wrns: + sys.stderr.write('Warnings found:\n') + for wrn in wrns: + sys.stderr.write('\t{}\n'.format(wrn)) + + if not errs: sys.stderr.write('Docstring for "{}" correct. :)\n'.format(func_name)) if examples_errs:
- [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry @jorisvandenbossche @toobaz a few edits here should make it work for both Py3 and Py2 users
https://api.github.com/repos/pandas-dev/pandas/pulls/20061
2018-03-08T21:07:04Z
2018-08-17T19:37:33Z
2018-08-17T19:37:33Z
2018-11-05T19:21:25Z
TST: series/indexing tests parameterization + moving test methods
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index 2629cfde9b4af..c1b6d0a452232 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -21,88 +21,72 @@ JOIN_TYPES = ['inner', 'outer', 'left', 'right'] -def test_align(test_data): - def _check_align(a, b, how='left', fill=None): - aa, ab = a.align(b, join=how, fill_value=fill) - - join_index = a.index.join(b.index, how=how) - if fill is not None: - diff_a = aa.index.difference(join_index) - diff_b = ab.index.difference(join_index) - if len(diff_a) > 0: - assert (aa.reindex(diff_a) == fill).all() - if len(diff_b) > 0: - assert (ab.reindex(diff_b) == fill).all() - - ea = a.reindex(join_index) - eb = b.reindex(join_index) - - if fill is not None: - ea = ea.fillna(fill) - eb = eb.fillna(fill) - - assert_series_equal(aa, ea) - assert_series_equal(ab, eb) - assert aa.name == 'ts' - assert ea.name == 'ts' - assert ab.name == 'ts' - assert eb.name == 'ts' - - for kind in JOIN_TYPES: - _check_align(test_data.ts[2:], test_data.ts[:-5], how=kind) - _check_align(test_data.ts[2:], test_data.ts[:-5], how=kind, fill=-1) - - # empty left - _check_align(test_data.ts[:0], test_data.ts[:-5], how=kind) - _check_align(test_data.ts[:0], test_data.ts[:-5], how=kind, fill=-1) - - # empty right - _check_align(test_data.ts[:-5], test_data.ts[:0], how=kind) - _check_align(test_data.ts[:-5], test_data.ts[:0], how=kind, fill=-1) - - # both empty - _check_align(test_data.ts[:0], test_data.ts[:0], how=kind) - _check_align(test_data.ts[:0], test_data.ts[:0], how=kind, fill=-1) - - -def test_align_fill_method(test_data): - def _check_align(a, b, how='left', method='pad', limit=None): - aa, ab = a.align(b, join=how, method=method, limit=limit) - - join_index = a.index.join(b.index, how=how) - ea = a.reindex(join_index) - eb = b.reindex(join_index) - - ea = ea.fillna(method=method, limit=limit) - eb = eb.fillna(method=method, limit=limit) - - assert_series_equal(aa, ea) - assert_series_equal(ab, eb) - - for kind in JOIN_TYPES: - for meth in ['pad', 'bfill']: - _check_align(test_data.ts[2:], test_data.ts[:-5], - how=kind, method=meth) - _check_align(test_data.ts[2:], test_data.ts[:-5], - how=kind, method=meth, limit=1) - - # empty left - _check_align(test_data.ts[:0], test_data.ts[:-5], - how=kind, method=meth) - _check_align(test_data.ts[:0], test_data.ts[:-5], - how=kind, method=meth, limit=1) - - # empty right - _check_align(test_data.ts[:-5], test_data.ts[:0], - how=kind, method=meth) - _check_align(test_data.ts[:-5], test_data.ts[:0], - how=kind, method=meth, limit=1) - - # both empty - _check_align(test_data.ts[:0], test_data.ts[:0], - how=kind, method=meth) - _check_align(test_data.ts[:0], test_data.ts[:0], - how=kind, method=meth, limit=1) +@pytest.mark.parametrize( + 'first_slice,second_slice', [ + [[2, None], [None, -5]], + [[None, 0], [None, -5]], + [[None, -5], [None, 0]], + [[None, 0], [None, 0]] + ]) +@pytest.mark.parametrize('join_type', JOIN_TYPES) +@pytest.mark.parametrize('fill', [None, -1]) +def test_align(test_data, first_slice, second_slice, join_type, fill): + a = test_data.ts[slice(*first_slice)] + b = test_data.ts[slice(*second_slice)] + + aa, ab = a.align(b, join=join_type, fill_value=fill) + + join_index = a.index.join(b.index, how=join_type) + if fill is not None: + diff_a = aa.index.difference(join_index) + diff_b = ab.index.difference(join_index) + if len(diff_a) > 0: + assert (aa.reindex(diff_a) == fill).all() + if len(diff_b) > 0: + assert (ab.reindex(diff_b) == fill).all() + + ea = a.reindex(join_index) + eb = b.reindex(join_index) + + if fill is not None: + ea = ea.fillna(fill) + eb = eb.fillna(fill) + + assert_series_equal(aa, ea) + assert_series_equal(ab, eb) + assert aa.name == 'ts' + assert ea.name == 'ts' + assert ab.name == 'ts' + assert eb.name == 'ts' + + +@pytest.mark.parametrize( + 'first_slice,second_slice', [ + [[2, None], [None, -5]], + [[None, 0], [None, -5]], + [[None, -5], [None, 0]], + [[None, 0], [None, 0]] + ]) +@pytest.mark.parametrize('join_type', JOIN_TYPES) +@pytest.mark.parametrize('method', ['pad', 'bfill']) +@pytest.mark.parametrize('limit', [None, 1]) +def test_align_fill_method(test_data, + first_slice, second_slice, + join_type, method, limit): + a = test_data.ts[slice(*first_slice)] + b = test_data.ts[slice(*second_slice)] + + aa, ab = a.align(b, join=join_type, method=method, limit=limit) + + join_index = a.index.join(b.index, how=join_type) + ea = a.reindex(join_index) + eb = b.reindex(join_index) + + ea = ea.fillna(method=method, limit=limit) + eb = eb.fillna(method=method, limit=limit) + + assert_series_equal(aa, ea) + assert_series_equal(ab, eb) def test_align_nocopy(test_data): @@ -481,3 +465,56 @@ def test_rename(): assert_series_equal(result, expected) assert result.name == expected.name + + +def test_drop(): + # unique + s = Series([1, 2], index=['one', 'two']) + expected = Series([1], index=['one']) + result = s.drop(['two']) + assert_series_equal(result, expected) + result = s.drop('two', axis='rows') + assert_series_equal(result, expected) + + # non-unique + # GH 5248 + s = Series([1, 1, 2], index=['one', 'two', 'one']) + expected = Series([1, 2], index=['one', 'one']) + result = s.drop(['two'], axis=0) + assert_series_equal(result, expected) + result = s.drop('two') + assert_series_equal(result, expected) + + expected = Series([1], index=['two']) + result = s.drop(['one']) + assert_series_equal(result, expected) + result = s.drop('one') + assert_series_equal(result, expected) + + # single string/tuple-like + s = Series(range(3), index=list('abc')) + pytest.raises(KeyError, s.drop, 'bc') + pytest.raises(KeyError, s.drop, ('a',)) + + # errors='ignore' + s = Series(range(3), index=list('abc')) + result = s.drop('bc', errors='ignore') + assert_series_equal(result, s) + result = s.drop(['a', 'd'], errors='ignore') + expected = s.iloc[1:] + assert_series_equal(result, expected) + + # bad axis + pytest.raises(ValueError, s.drop, 'one', axis='columns') + + # GH 8522 + s = Series([2, 3], index=[True, False]) + assert s.index.is_object() + result = s.drop(True) + expected = Series([3], index=[False]) + assert_series_equal(result, expected) + + # GH 16877 + s = Series([2, 3], index=[0, 1]) + with tm.assert_raises_regex(KeyError, 'not contained in axis'): + s.drop([False, True]) diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 75aa2898ae773..f1f4a5a05697d 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -283,34 +283,30 @@ def test_where_error(): []) -def test_where_array_like(): +@pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) +def test_where_array_like(klass): # see gh-15414 s = Series([1, 2, 3]) cond = [False, True, True] expected = Series([np.nan, 2, 3]) - klasses = [list, tuple, np.array, Series] - for klass in klasses: - result = s.where(klass(cond)) - assert_series_equal(result, expected) + result = s.where(klass(cond)) + assert_series_equal(result, expected) -def test_where_invalid_input(): +@pytest.mark.parametrize('cond', [ + [1, 0, 1], + Series([2, 5, 7]), + ["True", "False", "True"], + [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")] +]) +def test_where_invalid_input(cond): # see gh-15414: only boolean arrays accepted s = Series([1, 2, 3]) msg = "Boolean array expected for the condition" - conds = [ - [1, 0, 1], - Series([2, 5, 7]), - ["True", "False", "True"], - [Timestamp("2017-01-01"), - pd.NaT, Timestamp("2017-01-02")] - ] - - for cond in conds: - with tm.assert_raises_regex(ValueError, msg): - s.where(cond) + with tm.assert_raises_regex(ValueError, msg): + s.where(cond) msg = "Array conditional must be same shape as self" with tm.assert_raises_regex(ValueError, msg): @@ -403,37 +399,43 @@ def f(): assert_series_equal(s, expected) -def test_where_broadcast(): - # Test a variety of differently sized series - for size in range(2, 6): - # Test a variety of boolean indices - for selection in [ - # First element should be set - np.resize([True, False, False, False, False], size), - # Set alternating elements] - np.resize([True, False], size), - # No element should be set - np.resize([False], size) - ]: - - # Test a variety of different numbers as content - for item in [2.0, np.nan, np.finfo(np.float).max, - np.finfo(np.float).min]: - # Test numpy arrays, lists and tuples as the input to be - # broadcast - for arr in [np.array([item]), [item], (item,)]: - data = np.arange(size, dtype=float) - s = Series(data) - s[selection] = arr - # Construct the expected series by taking the source - # data or item based on the selection - expected = Series([item if use_item else data[ - i] for i, use_item in enumerate(selection)]) - assert_series_equal(s, expected) - - s = Series(data) - result = s.where(~selection, arr) - assert_series_equal(result, expected) +@pytest.mark.parametrize('size', range(2, 6)) +@pytest.mark.parametrize('mask', [ + [True, False, False, False, False], + [True, False], + [False] +]) +@pytest.mark.parametrize('item', [ + 2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min +]) +# Test numpy arrays, lists and tuples as the input to be +# broadcast +@pytest.mark.parametrize('box', [ + lambda x: np.array([x]), + lambda x: [x], + lambda x: (x,) +]) +def test_broadcast(size, mask, item, box): + selection = np.resize(mask, size) + + data = np.arange(size, dtype=float) + + # Construct the expected series by taking the source + # data or item based on the selection + expected = Series([item if use_item else data[ + i] for i, use_item in enumerate(selection)]) + + s = Series(data) + s[selection] = box(item) + assert_series_equal(s, expected) + + s = Series(data) + result = s.where(~selection, box(item)) + assert_series_equal(result, expected) + + s = Series(data) + result = s.mask(selection, box(item)) + assert_series_equal(result, expected) def test_where_inplace(): @@ -587,29 +589,6 @@ def test_mask(): assert_series_equal(result, expected) -def test_mask_broadcast(): - # GH 8801 - # copied from test_where_broadcast - for size in range(2, 6): - for selection in [ - # First element should be set - np.resize([True, False, False, False, False], size), - # Set alternating elements] - np.resize([True, False], size), - # No element should be set - np.resize([False], size) - ]: - for item in [2.0, np.nan, np.finfo(np.float).max, - np.finfo(np.float).min]: - for arr in [np.array([item]), [item], (item,)]: - data = np.arange(size, dtype=float) - s = Series(data) - result = s.mask(selection, arr) - expected = Series([item if use_item else data[ - i] for i, use_item in enumerate(selection)]) - assert_series_equal(result, expected) - - def test_mask_inplace(): s = Series(np.random.randn(5)) cond = s > 0 diff --git a/pandas/tests/series/indexing/test_callable.py b/pandas/tests/series/indexing/test_callable.py new file mode 100644 index 0000000000000..b656137545903 --- /dev/null +++ b/pandas/tests/series/indexing/test_callable.py @@ -0,0 +1,33 @@ +import pandas as pd +import pandas.util.testing as tm + + +def test_getitem_callable(): + # GH 12533 + s = pd.Series(4, index=list('ABCD')) + result = s[lambda x: 'A'] + assert result == s.loc['A'] + + result = s[lambda x: ['A', 'B']] + tm.assert_series_equal(result, s.loc[['A', 'B']]) + + result = s[lambda x: [True, False, True, True]] + tm.assert_series_equal(result, s.iloc[[0, 2, 3]]) + + +def test_setitem_callable(): + # GH 12533 + s = pd.Series([1, 2, 3, 4], index=list('ABCD')) + s[lambda x: 'A'] = -1 + tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD'))) + + +def test_setitem_other_callable(): + # GH 13299 + inc = lambda x: x + 1 + + s = pd.Series([1, 2, -1, 4]) + s[s < 0] = inc + + expected = pd.Series([1, 2, inc, 4]) + tm.assert_series_equal(s, expected) diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index db8118384f6f6..f484cdea2e09f 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -700,11 +700,11 @@ def test_nat_operations(): assert s.max() == exp -def test_round_nat(): +@pytest.mark.parametrize('method', ["round", "floor", "ceil"]) +@pytest.mark.parametrize('freq', ["s", "5s", "min", "5min", "h", "5h"]) +def test_round_nat(method, freq): # GH14940 s = Series([pd.NaT]) expected = Series(pd.NaT) - for method in ["round", "floor", "ceil"]: - round_method = getattr(s.dt, method) - for freq in ["s", "5s", "min", "5min", "h", "5h"]: - assert_series_equal(round_method(freq), expected) + round_method = getattr(s.dt, method) + assert_series_equal(round_method(freq), expected) diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py index 5908a7708c426..648a37ce0262b 100644 --- a/pandas/tests/series/indexing/test_iloc.py +++ b/pandas/tests/series/indexing/test_iloc.py @@ -9,8 +9,6 @@ from pandas.util.testing import (assert_series_equal, assert_almost_equal) -JOIN_TYPES = ['inner', 'outer', 'left', 'right'] - def test_iloc(): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 9005ac8e97929..5cc1a8ff1c451 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -21,7 +21,58 @@ import pandas.util.testing as tm -JOIN_TYPES = ['inner', 'outer', 'left', 'right'] +def test_basic_indexing(): + s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) + + pytest.raises(IndexError, s.__getitem__, 5) + pytest.raises(IndexError, s.__setitem__, 5, 0) + + pytest.raises(KeyError, s.__getitem__, 'c') + + s = s.sort_index() + + pytest.raises(IndexError, s.__getitem__, 5) + pytest.raises(IndexError, s.__setitem__, 5, 0) + + +def test_basic_getitem_with_labels(test_data): + indices = test_data.ts.index[[5, 10, 15]] + + result = test_data.ts[indices] + expected = test_data.ts.reindex(indices) + assert_series_equal(result, expected) + + result = test_data.ts[indices[0]:indices[2]] + expected = test_data.ts.loc[indices[0]:indices[2]] + assert_series_equal(result, expected) + + # integer indexes, be careful + s = Series(np.random.randn(10), index=lrange(0, 20, 2)) + inds = [0, 2, 5, 7, 8] + arr_inds = np.array([0, 2, 5, 7, 8]) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s[inds] + expected = s.reindex(inds) + assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = s[arr_inds] + expected = s.reindex(arr_inds) + assert_series_equal(result, expected) + + # GH12089 + # with tz for values + s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), + index=['a', 'b', 'c']) + expected = Timestamp('2011-01-01', tz='US/Eastern') + result = s.loc['a'] + assert result == expected + result = s.iloc[0] + assert result == expected + result = s['a'] + assert result == expected def test_getitem_setitem_ellipsis(): @@ -36,18 +87,6 @@ def test_getitem_setitem_ellipsis(): assert (result == 5).all() -def test_pop(): - # GH 6600 - df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, }) - k = df.iloc[4] - - result = k.pop('B') - assert result == 4 - - expected = Series([0, 0], index=['A', 'C'], name=4) - assert_series_equal(k, expected) - - def test_getitem_get(test_data): test_series = test_data.series test_obj_series = test_data.objSeries @@ -75,11 +114,6 @@ def test_getitem_get(test_data): assert result is None -def test_getitem_int64(test_data): - idx = np.int64(5) - assert test_data.ts[idx] == test_data.ts[5] - - def test_getitem_fancy(test_data): slice1 = test_data.series[[1, 2, 3]] slice2 = test_data.objSeries[[1, 2, 3]] @@ -199,26 +233,6 @@ def test_getitem_dups(): assert_series_equal(result, expected) -def test_getitem_dataframe(): - rng = list(range(10)) - s = pd.Series(10, index=rng) - df = pd.DataFrame(rng, index=rng) - pytest.raises(TypeError, s.__getitem__, df > 5) - - -def test_getitem_callable(): - # GH 12533 - s = pd.Series(4, index=list('ABCD')) - result = s[lambda x: 'A'] - assert result == s.loc['A'] - - result = s[lambda x: ['A', 'B']] - tm.assert_series_equal(result, s.loc[['A', 'B']]) - - result = s[lambda x: [True, False, True, True]] - tm.assert_series_equal(result, s.iloc[[0, 2, 3]]) - - def test_setitem_ambiguous_keyerror(): s = Series(lrange(10), index=lrange(0, 20, 2)) @@ -234,48 +248,11 @@ def test_setitem_ambiguous_keyerror(): assert_series_equal(s2, expected) -def test_setitem_callable(): - # GH 12533 - s = pd.Series([1, 2, 3, 4], index=list('ABCD')) - s[lambda x: 'A'] = -1 - tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD'))) - - -def test_setitem_other_callable(): - # GH 13299 - inc = lambda x: x + 1 - - s = pd.Series([1, 2, -1, 4]) - s[s < 0] = inc - - expected = pd.Series([1, 2, inc, 4]) - tm.assert_series_equal(s, expected) - - -def test_slice(test_data): - numSlice = test_data.series[10:20] - numSliceEnd = test_data.series[-10:] - objSlice = test_data.objSeries[10:20] - - assert test_data.series.index[9] not in numSlice.index - assert test_data.objSeries.index[9] not in objSlice.index - - assert len(numSlice) == len(numSlice.index) - assert test_data.series[numSlice.index[0]] == numSlice[numSlice.index[0]] - - assert numSlice.index[1] == test_data.series.index[11] - assert tm.equalContents(numSliceEnd, np.array(test_data.series)[-10:]) - - # Test return view. - sl = test_data.series[10:20] - sl[:] = 0 - - assert (test_data.series[10:20] == 0).all() - - -def test_slice_can_reorder_not_uniquely_indexed(): - s = Series(1, index=['a', 'a', 'b', 'b', 'c']) - s[::-1] # it works! +def test_getitem_dataframe(): + rng = list(range(10)) + s = pd.Series(10, index=rng) + df = pd.DataFrame(rng, index=rng) + pytest.raises(TypeError, s.__getitem__, df > 5) def test_setitem(test_data): @@ -389,86 +366,46 @@ def test_basic_getitem_setitem_corner(test_data): [5, slice(None, None)], 2) -def test_basic_getitem_with_labels(test_data): - indices = test_data.ts.index[[5, 10, 15]] - - result = test_data.ts[indices] - expected = test_data.ts.reindex(indices) - assert_series_equal(result, expected) - - result = test_data.ts[indices[0]:indices[2]] - expected = test_data.ts.loc[indices[0]:indices[2]] - assert_series_equal(result, expected) - - # integer indexes, be careful - s = Series(np.random.randn(10), index=lrange(0, 20, 2)) - inds = [0, 2, 5, 7, 8] - arr_inds = np.array([0, 2, 5, 7, 8]) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = s[inds] - expected = s.reindex(inds) - assert_series_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = s[arr_inds] - expected = s.reindex(arr_inds) - assert_series_equal(result, expected) - - # GH12089 - # with tz for values - s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), - index=['a', 'b', 'c']) - expected = Timestamp('2011-01-01', tz='US/Eastern') - result = s.loc['a'] - assert result == expected - result = s.iloc[0] - assert result == expected - result = s['a'] - assert result == expected - - -def test_setitem_with_tz(): - for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']: - orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3, - tz=tz)) - assert orig.dtype == 'datetime64[ns, {0}]'.format(tz) +@pytest.mark.parametrize('tz', ['US/Eastern', 'UTC', 'Asia/Tokyo']) +def test_setitem_with_tz(tz): + orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3, + tz=tz)) + assert orig.dtype == 'datetime64[ns, {0}]'.format(tz) - # scalar - s = orig.copy() - s[1] = pd.Timestamp('2011-01-01', tz=tz) - exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), - pd.Timestamp('2011-01-01 00:00', tz=tz), - pd.Timestamp('2016-01-01 02:00', tz=tz)]) - tm.assert_series_equal(s, exp) + # scalar + s = orig.copy() + s[1] = pd.Timestamp('2011-01-01', tz=tz) + exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), + pd.Timestamp('2011-01-01 00:00', tz=tz), + pd.Timestamp('2016-01-01 02:00', tz=tz)]) + tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[1] = pd.Timestamp('2011-01-01', tz=tz) - tm.assert_series_equal(s, exp) + s = orig.copy() + s.loc[1] = pd.Timestamp('2011-01-01', tz=tz) + tm.assert_series_equal(s, exp) - s = orig.copy() - s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz) - tm.assert_series_equal(s, exp) + s = orig.copy() + s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz) + tm.assert_series_equal(s, exp) - # vector - vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2]) - assert vals.dtype == 'datetime64[ns, {0}]'.format(tz) + # vector + vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz), + pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2]) + assert vals.dtype == 'datetime64[ns, {0}]'.format(tz) - s[[1, 2]] = vals - exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), - pd.Timestamp('2011-01-01 00:00', tz=tz), - pd.Timestamp('2012-01-01 00:00', tz=tz)]) - tm.assert_series_equal(s, exp) + s[[1, 2]] = vals + exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), + pd.Timestamp('2011-01-01 00:00', tz=tz), + pd.Timestamp('2012-01-01 00:00', tz=tz)]) + tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[[1, 2]] = vals - tm.assert_series_equal(s, exp) + s = orig.copy() + s.loc[[1, 2]] = vals + tm.assert_series_equal(s, exp) - s = orig.copy() - s.iloc[[1, 2]] = vals - tm.assert_series_equal(s, exp) + s = orig.copy() + s.iloc[[1, 2]] = vals + tm.assert_series_equal(s, exp) def test_setitem_with_tz_dst(): @@ -550,22 +487,30 @@ def test_categorial_assigning_ops(): tm.assert_series_equal(s, exp) -def test_take(): - s = Series([-1, 5, 6, 2, 4]) +def test_slice(test_data): + numSlice = test_data.series[10:20] + numSliceEnd = test_data.series[-10:] + objSlice = test_data.objSeries[10:20] - actual = s.take([1, 3, 4]) - expected = Series([5, 2, 4], index=[1, 3, 4]) - tm.assert_series_equal(actual, expected) + assert test_data.series.index[9] not in numSlice.index + assert test_data.objSeries.index[9] not in objSlice.index - actual = s.take([-1, 3, 4]) - expected = Series([4, 2, 4], index=[4, 3, 4]) - tm.assert_series_equal(actual, expected) + assert len(numSlice) == len(numSlice.index) + assert test_data.series[numSlice.index[0]] == numSlice[numSlice.index[0]] - pytest.raises(IndexError, s.take, [1, 10]) - pytest.raises(IndexError, s.take, [2, 5]) + assert numSlice.index[1] == test_data.series.index[11] + assert tm.equalContents(numSliceEnd, np.array(test_data.series)[-10:]) - with tm.assert_produces_warning(FutureWarning): - s.take([-1, 3, 4], convert=False) + # Test return view. + sl = test_data.series[10:20] + sl[:] = 0 + + assert (test_data.series[10:20] == 0).all() + + +def test_slice_can_reorder_not_uniquely_indexed(): + s = Series(1, index=['a', 'a', 'b', 'b', 'c']) + s[::-1] # it works! def test_ix_setitem(test_data): @@ -615,20 +560,6 @@ def test_setitem_na(): assert_series_equal(s, expected) -def test_basic_indexing(): - s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) - - pytest.raises(IndexError, s.__getitem__, 5) - pytest.raises(IndexError, s.__setitem__, 5, 0) - - pytest.raises(KeyError, s.__getitem__, 'c') - - s = s.sort_index() - - pytest.raises(IndexError, s.__getitem__, 5) - pytest.raises(IndexError, s.__setitem__, 5, 0) - - def test_timedelta_assignment(): # GH 8209 s = Series([]) @@ -700,73 +631,6 @@ def test_preserve_refs(test_data): assert not np.isnan(test_data.ts[10]) -def test_drop(): - # unique - s = Series([1, 2], index=['one', 'two']) - expected = Series([1], index=['one']) - result = s.drop(['two']) - assert_series_equal(result, expected) - result = s.drop('two', axis='rows') - assert_series_equal(result, expected) - - # non-unique - # GH 5248 - s = Series([1, 1, 2], index=['one', 'two', 'one']) - expected = Series([1, 2], index=['one', 'one']) - result = s.drop(['two'], axis=0) - assert_series_equal(result, expected) - result = s.drop('two') - assert_series_equal(result, expected) - - expected = Series([1], index=['two']) - result = s.drop(['one']) - assert_series_equal(result, expected) - result = s.drop('one') - assert_series_equal(result, expected) - - # single string/tuple-like - s = Series(range(3), index=list('abc')) - pytest.raises(KeyError, s.drop, 'bc') - pytest.raises(KeyError, s.drop, ('a',)) - - # errors='ignore' - s = Series(range(3), index=list('abc')) - result = s.drop('bc', errors='ignore') - assert_series_equal(result, s) - result = s.drop(['a', 'd'], errors='ignore') - expected = s.iloc[1:] - assert_series_equal(result, expected) - - # bad axis - pytest.raises(ValueError, s.drop, 'one', axis='columns') - - # GH 8522 - s = Series([2, 3], index=[True, False]) - assert s.index.is_object() - result = s.drop(True) - expected = Series([3], index=[False]) - assert_series_equal(result, expected) - - # GH 16877 - s = Series([2, 3], index=[0, 1]) - with tm.assert_raises_regex(KeyError, 'not contained in axis'): - s.drop([False, True]) - - -def test_select(test_data): - # deprecated: gh-12410 - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - n = len(test_data.ts) - result = test_data.ts.select(lambda x: x >= test_data.ts.index[n // 2]) - expected = test_data.ts.reindex(test_data.ts.index[n // 2:]) - assert_series_equal(result, expected) - - result = test_data.ts.select(lambda x: x.weekday() == 2) - expected = test_data.ts[test_data.ts.index.weekday == 2] - assert_series_equal(result, expected) - - def test_cast_on_putmask(): # GH 2746 @@ -799,13 +663,6 @@ def test_type_promote_putmask(): assert_series_equal(s, Series([0, 'foo', 'bar', 0])) -def test_head_tail(test_data): - assert_series_equal(test_data.series.head(), test_data.series[:5]) - assert_series_equal(test_data.series.head(0), test_data.series[0:0]) - assert_series_equal(test_data.series.tail(), test_data.series[-5:]) - assert_series_equal(test_data.series.tail(0), test_data.series[0:0]) - - def test_multilevel_preserve_name(): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], @@ -845,3 +702,59 @@ def test_setitem_slice_into_readonly_backing_data(): series[1:3] = 1 assert not array.any() + + +""" +miscellaneous methods +""" + + +def test_select(test_data): + # deprecated: gh-12410 + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + n = len(test_data.ts) + result = test_data.ts.select(lambda x: x >= test_data.ts.index[n // 2]) + expected = test_data.ts.reindex(test_data.ts.index[n // 2:]) + assert_series_equal(result, expected) + + result = test_data.ts.select(lambda x: x.weekday() == 2) + expected = test_data.ts[test_data.ts.index.weekday == 2] + assert_series_equal(result, expected) + + +def test_pop(): + # GH 6600 + df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, }) + k = df.iloc[4] + + result = k.pop('B') + assert result == 4 + + expected = Series([0, 0], index=['A', 'C'], name=4) + assert_series_equal(k, expected) + + +def test_take(): + s = Series([-1, 5, 6, 2, 4]) + + actual = s.take([1, 3, 4]) + expected = Series([5, 2, 4], index=[1, 3, 4]) + tm.assert_series_equal(actual, expected) + + actual = s.take([-1, 3, 4]) + expected = Series([4, 2, 4], index=[4, 3, 4]) + tm.assert_series_equal(actual, expected) + + pytest.raises(IndexError, s.take, [1, 10]) + pytest.raises(IndexError, s.take, [2, 5]) + + with tm.assert_produces_warning(FutureWarning): + s.take([-1, 3, 4], convert=False) + + +def test_head_tail(test_data): + assert_series_equal(test_data.series.head(), test_data.series[:5]) + assert_series_equal(test_data.series.head(0), test_data.series[0:0]) + assert_series_equal(test_data.series.tail(), test_data.series[-5:]) + assert_series_equal(test_data.series.tail(0), test_data.series[0:0]) diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py index d78b09a3c6ccb..088406e0a1db6 100644 --- a/pandas/tests/series/indexing/test_loc.py +++ b/pandas/tests/series/indexing/test_loc.py @@ -12,9 +12,6 @@ from pandas.util.testing import (assert_series_equal) -JOIN_TYPES = ['inner', 'outer', 'left', 'right'] - - def test_loc_getitem(test_data): inds = test_data.series.index[[3, 4, 7]] assert_series_equal( diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py index e6035ccf2d569..b964ec3874998 100644 --- a/pandas/tests/series/indexing/test_numeric.py +++ b/pandas/tests/series/indexing/test_numeric.py @@ -229,3 +229,8 @@ def test_int_indexing(): pytest.raises(KeyError, s.__getitem__, 5) pytest.raises(KeyError, s.__getitem__, 'c') + + +def test_getitem_int64(test_data): + idx = np.int64(5) + assert test_data.ts[idx] == test_data.ts[5]
Parts 2 and 3 of #20014. - tests parameterization - moving some tests from test_indexing.py to other files - test methods reordering in test_indexing.py - [x] closes #20014 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/20059
2018-03-08T20:10:05Z
2018-03-10T01:40:11Z
2018-03-10T01:40:11Z
2018-03-10T09:55:31Z
Small typo in apply.py
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 9056f78ee02ed..8fb74e2e87174 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -191,7 +191,7 @@ def apply_broadcast(self, target): for i, col in enumerate(target.columns): res = self.f(target[col]) - ares = np. asarray(res).ndim + ares = np.asarray(res).ndim # must be a scalar or 1d if ares > 1:
`np. asarray(res)` --> `np.asarray(res)`
https://api.github.com/repos/pandas-dev/pandas/pulls/20058
2018-03-08T16:15:03Z
2018-03-09T11:30:22Z
2018-03-09T11:30:22Z
2018-03-09T18:04:23Z
DOC: Updating pandas.Interval docstring
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index c0b2ca66e30a6..f969c5db5b902 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -81,33 +81,79 @@ cdef class Interval(IntervalMixin): Parameters ---------- - left : value - Left bound for the interval - right : value - Right bound for the interval + left : orderable scalar + Left bound for the interval. + right : orderable scalar + Right bound for the interval. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the interval is closed on the left-side, right-side, both or - neither + neither. + + Notes + ----- + The parameters `left` and `right` must be from the same type, you must be + able to compare them and they must satisfy ``left <= right``. Examples -------- + It is possible to build Intervals of different types, like numeric ones: + >>> iv = pd.Interval(left=0, right=5) >>> iv Interval(0, 5, closed='right') + + You can check if an element belongs to it + >>> 2.5 in iv True - >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01'), - ... pd.Timestamp('2017-12-31'), closed='both') + You can test the bounds + + >>> 0 in iv + False + >>> 5 in iv + True + + Calculate its length + + >>> iv.length + 5 + + You can operate with `+` and `*` over an Interval and the operation + is applied to each of its bounds, so the result depends on the type + of the bound elements + + >>> shifted_iv = iv + 3 + >>> shifted_iv + Interval(3, 8, closed='right') + >>> extended_iv = iv * 10.0 + >>> extended_iv + Interval(0.0, 50.0, closed='right') + + To create a time interval you can use Timestamps as the bounds + + >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'), + ... pd.Timestamp('2018-01-01 00:00:00'), + ... closed='left') >>> pd.Timestamp('2017-01-01 00:00') in year_2017 True + >>> year_2017.length + Timedelta('365 days 00:00:00') + + And also you can create string intervals + + >>> volume_1 = pd.Interval('Ant', 'Dog', closed='both') + >>> 'Bee' in volume_1 + True See Also -------- IntervalIndex : An Index of Interval objects that are all closed on the - same side. - cut, qcut : Convert arrays of continuous data into Categoricals/Series of - Interval. + same side. + cut : Bin values into discrete intervals. + qcut : Discretize values into equal-sized buckets based on rank or + based on sample quantiles. + Period : Represents a period of time. """ _typ = "interval"
Docstring assigned to the Buenos Aires chapter for the sprint. Hi, I have a few doubts about the changes. First about the type of the parameters *left* and *right*, it's ok to refer to them as *orderable*? In the implementation, the constructor takes any pair of objects that satisfy `left <= right` and the comparison returns a boolean value. I tried to pass two numpy arrays and it breaks because of ambiguity, but still it's so general that you can even make Intervals of Intervals, sets or booleans. And I believe this is too permissive and prone to error. So what it's better, put a warning about this or add an example that is a little more complex and uses this capability (I tried to think one, but nothing came up to my mind). And last, validate_docstrings.py returned this error `2341, in _signature_from_callable 'no signature found for builtin type {!r}'.format(obj)) ValueError: no signature found for builtin type <class 'pandas._libs.interval.Interval'> ` Thanks, happy to collaborate. PS: Given that this is a Cython file I needed to recompile the project to see the changes in the documentation. I mention this because is not in the sprint guide and I don't know if I will able to add it before the event.
https://api.github.com/repos/pandas-dev/pandas/pulls/20057
2018-03-08T16:09:19Z
2018-03-14T13:11:50Z
2018-03-14T13:11:50Z
2018-03-14T13:12:04Z
Temporary github PR template for sprint
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4e1e9ce017408..c1e02bd8eafc4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,27 @@ +Checklist for the pandas documentation sprint (ignore this if you are doing +an unrelated PR): + +- [ ] PR title is "DOC: update the <your-function-or-method> docstring" +- [ ] The validation script passes: `scripts/validate_docstrings.py <your-function-or-method>` +- [ ] The PEP8 style check passes: `git diff upstream/master -u -- "*.py" | flake8 --diff` +- [ ] The html version looks good: `python doc/make.py --single <your-function-or-method>` +- [ ] It has been proofread on language by another sprint participant + +Please include the output of the validation script below between the "```" ticks: + +``` +# paste output of "scripts/validate_docstrings.py <your-function-or-method>" here +# between the "```" (remove this comment, but keep the "```") + +``` + +If the validation script still gives errors, but you think there is a good reason +to deviate in this case (and there are certainly such cases), please state this +explicitly. + + +Checklist for other PRs (remove this part if you are doing a PR for the pandas documentation sprint): + - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
@datapythonista do you think this would be useful to repeat it once more here?
https://api.github.com/repos/pandas-dev/pandas/pulls/20055
2018-03-08T13:21:36Z
2018-03-09T09:03:45Z
2018-03-09T09:03:45Z
2018-03-09T09:03:48Z
TST: xfail test_time on py2 & mpl 1.4.3
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 3b70b3608a0bd..2f2931c9c86ac 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1041,7 +1041,7 @@ def test_irreg_dtypes(self): _, ax = self.plt.subplots() _check_plot_works(df.plot, ax=ax) - @td.xfail_if_mpl_2_2 + @pytest.mark.xfail(not PY3, reason="failing on mpl 1.4.3 on PY2") @pytest.mark.slow def test_time(self): t = datetime(1, 1, 1, 3, 30, 0)
https://api.github.com/repos/pandas-dev/pandas/pulls/20053
2018-03-08T11:27:19Z
2018-03-08T12:08:58Z
2018-03-08T12:08:58Z
2018-03-08T12:08:58Z
CLN: Preliminary formatter refactor
- [x] based on discussion here: https://github.com/pandas-dev/pandas/pull/20032#pullrequestreview-101917292 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This PR splits the various formatters into different files so that it's easier to refactor a specific formatter.
https://api.github.com/repos/pandas-dev/pandas/pulls/20051
2018-03-08T10:27:19Z
2018-03-14T08:32:10Z
2018-03-14T08:32:30Z
2018-03-14T08:32:30Z
DOC: Updated docstring DatetimeIndex.tz_localize
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6b97ee90cd93c..344ec9f3a89de 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1937,16 +1937,21 @@ def tz_convert(self, tz): mapping={True: 'infer', False: 'raise'}) def tz_localize(self, tz, ambiguous='raise', errors='raise'): """ - Localize tz-naive DatetimeIndex to given time zone (using - pytz/dateutil), or remove timezone from tz-aware DatetimeIndex + Localize tz-naive DatetimeIndex to tz-aware DatetimeIndex. + + This method takes a time zone (tz) naive DatetimeIndex object and + makes this time zone aware. It does not move the time to another + time zone. + Time zone localization helps to switch from time zone aware to time + zone unaware objects. Parameters ---------- tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding local time. - ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + Time zone to convert timestamps to. Passing ``None`` will + remove the time zone information preserving local time. + ambiguous : str {'infer', 'NaT', 'raise'} or bool array, \ +default 'raise' - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a @@ -1955,12 +1960,12 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'): - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times - errors : 'raise', 'coerce', default 'raise' + errors : {'raise', 'coerce'}, default 'raise' - 'raise' will raise a NonExistentTimeError if a timestamp is not - valid in the specified timezone (e.g. due to a transition from + valid in the specified time zone (e.g. due to a transition from or to DST time) - 'coerce' will return NaT if the timestamp can not be converted - into the specified timezone + to the specified time zone .. versionadded:: 0.19.0 @@ -1970,12 +1975,43 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'): Returns ------- - localized : DatetimeIndex + DatetimeIndex + Index converted to the specified time zone. Raises ------ TypeError If the DatetimeIndex is tz-aware and tz is not None. + + See Also + -------- + DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from + one time zone to another. + + Examples + -------- + >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) + >>> tz_naive + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq='D') + + Localize DatetimeIndex in US/Eastern time zone: + + >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') + >>> tz_aware + DatetimeIndex(['2018-03-01 09:00:00-05:00', + '2018-03-02 09:00:00-05:00', + '2018-03-03 09:00:00-05:00'], + dtype='datetime64[ns, US/Eastern]', freq='D') + + With the ``tz=None``, we can remove the time zone information + while keeping the local time (not converted to UTC): + + >>> tz_aware.tz_localize(None) + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq='D') """ if self.tz is not None: if tz is None:
Kindly review this docstring DatetimeIndex.tz_localize
https://api.github.com/repos/pandas-dev/pandas/pulls/20050
2018-03-08T10:22:44Z
2018-03-14T08:55:56Z
2018-03-14T08:55:56Z
2018-03-14T09:03:21Z
API: PeriodIndex subtraction to return object Index of DateOffsets
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index fd34424dedc52..80709c098036b 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -68,6 +68,7 @@ Datetimelike API Changes - For :class:`DatetimeIndex` and :class:`TimedeltaIndex` with non-``None`` ``freq`` attribute, addition or subtraction of integer-dtyped array or ``Index`` will return an object of the same class (:issue:`19959`) - :class:`DateOffset` objects are now immutable. Attempting to alter one of these will now raise ``AttributeError`` (:issue:`21341`) +- :class:`PeriodIndex` subtraction of another ``PeriodIndex`` will now return an object-dtype :class:`Index` of :class:`DateOffset` objects instead of raising a ``TypeError`` (:issue:`20049`) .. _whatsnew_0240.api.other: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c7cb245263df8..70c87b0a231c1 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -13,7 +13,8 @@ import numpy as np from pandas._libs import lib, iNaT, NaT, Timedelta -from pandas._libs.tslibs.period import Period +from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, + _DIFFERENT_FREQ_INDEX) from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.timestamps import round_ns @@ -784,6 +785,41 @@ def _sub_nat(self): def _sub_period(self, other): return NotImplemented + def _sub_period_array(self, other): + """ + Subtract one PeriodIndex from another. This is only valid if they + have the same frequency. + + Parameters + ---------- + other : PeriodIndex + + Returns + ------- + result : np.ndarray[object] + Array of DateOffset objects; nulls represented by NaT + """ + if not is_period_dtype(self): + raise TypeError("cannot subtract {dtype}-dtype to {cls}" + .format(dtype=other.dtype, + cls=type(self).__name__)) + + if not len(self) == len(other): + raise ValueError("cannot subtract indices of unequal length") + if self.freq != other.freq: + msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) + raise IncompatibleFrequency(msg) + + new_values = checked_add_with_arr(self.asi8, -other.asi8, + arr_mask=self._isnan, + b_mask=other._isnan) + + new_values = np.array([self.freq * x for x in new_values]) + if self.hasnans or other.hasnans: + mask = (self._isnan) | (other._isnan) + new_values[mask] = NaT + return new_values + def _add_offset(self, offset): raise com.AbstractMethodError(self) @@ -894,7 +930,7 @@ def __add__(self, other): return self._add_datelike(other) elif is_integer_dtype(other): result = self._addsub_int_array(other, operator.add) - elif is_float_dtype(other): + elif is_float_dtype(other) or is_period_dtype(other): # Explicitly catch invalid dtypes raise TypeError("cannot add {dtype}-dtype to {cls}" .format(dtype=other.dtype, @@ -953,6 +989,9 @@ def __sub__(self, other): elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): # DatetimeIndex, ndarray[datetime64] result = self._sub_datelike(other) + elif is_period_dtype(other): + # PeriodIndex + result = self._sub_period_array(other) elif is_integer_dtype(other): result = self._addsub_int_array(other, operator.sub) elif isinstance(other, Index): diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 0649083a440df..a0b894e73b41d 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -762,6 +762,17 @@ def test_sub_period(self, freq): with pytest.raises(TypeError): p - idx + @pytest.mark.parametrize('op', [operator.add, ops.radd, + operator.sub, ops.rsub]) + @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H']) + @pytest.mark.parametrize('dti_freq', [None, 'D']) + def test_dti_sub_pi(self, dti_freq, pi_freq, op): + # GH#20049 subtracting PeriodIndex should raise TypeError + dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq) + pi = dti.to_period(pi_freq) + with pytest.raises(TypeError): + op(dti, pi) + def test_ufunc_coercions(self): idx = date_range('2011-01-01', periods=3, freq='2D', name='x') diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index aea019d910fe0..79f08ab490494 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -258,6 +258,57 @@ def test_comp_nat(self, dtype): class TestPeriodIndexArithmetic(object): + # --------------------------------------------------------------- + # __add__/__sub__ with PeriodIndex + # PeriodIndex + other is defined for integers and timedelta-like others + # PeriodIndex - other is defined for integers, timedelta-like others, + # and PeriodIndex (with matching freq) + + def test_pi_add_iadd_pi_raises(self): + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='D', periods=5) + + # An earlier implementation of PeriodIndex addition performed + # a set operation (union). This has since been changed to + # raise a TypeError. See GH#14164 and GH#13077 for historical + # reference. + with pytest.raises(TypeError): + rng + other + + with pytest.raises(TypeError): + rng += other + + def test_pi_sub_isub_pi(self): + # GH#20049 + # For historical reference see GH#14164, GH#13077. + # PeriodIndex subtraction originally performed set difference, + # then changed to raise TypeError before being implemented in GH#20049 + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='D', periods=5) + + off = rng.freq + expected = pd.Index([-5 * off] * 5) + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_pi_with_nat(self): + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = rng[1:].insert(0, pd.NaT) + assert other[1:].equals(rng[1:]) + + result = rng - other + off = rng.freq + expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off]) + tm.assert_index_equal(result, expected) + + def test_pi_sub_pi_mismatched_freq(self): + rng = pd.period_range('1/1/2000', freq='D', periods=5) + other = pd.period_range('1/6/2000', freq='H', periods=5) + with pytest.raises(period.IncompatibleFrequency): + rng - other # ------------------------------------------------------------- # Invalid Operations @@ -379,17 +430,6 @@ def test_pi_sub_offset_array(self, box): with tm.assert_produces_warning(PerformanceWarning): anchored - pi - def test_pi_add_iadd_pi_raises(self): - rng = pd.period_range('1/1/2000', freq='D', periods=5) - other = pd.period_range('1/6/2000', freq='D', periods=5) - - # previously performed setop union, now raises TypeError (GH14164) - with pytest.raises(TypeError): - rng + other - - with pytest.raises(TypeError): - rng += other - def test_pi_add_iadd_int(self, one): # Variants of `one` for #19012 rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) @@ -419,18 +459,6 @@ def test_pi_sub_intlike(self, five): exp = rng + (-five) tm.assert_index_equal(result, exp) - def test_pi_sub_isub_pi_raises(self): - # previously performed setop, now raises TypeError (GH14164) - # TODO needs to wait on #13077 for decision on result type - rng = pd.period_range('1/1/2000', freq='D', periods=5) - other = pd.period_range('1/6/2000', freq='D', periods=5) - - with pytest.raises(TypeError): - rng - other - - with pytest.raises(TypeError): - rng -= other - def test_pi_sub_isub_offset(self): # offset # DateOffset diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 786ff5cde1806..d47d75d2f3485 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -305,6 +305,18 @@ def test_tdi_sub_period(self, freq): with pytest.raises(TypeError): p - idx + @pytest.mark.parametrize('op', [operator.add, ops.radd, + operator.sub, ops.rsub]) + @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H']) + @pytest.mark.parametrize('tdi_freq', [None, 'H']) + def test_dti_sub_pi(self, tdi_freq, pi_freq, op): + # GH#20049 subtracting PeriodIndex should raise TypeError + tdi = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq) + dti = pd.Timestamp('2018-03-07 17:16:40') + tdi + pi = dti.to_period(pi_freq) + with pytest.raises(TypeError): + op(dti, pi) + # ------------------------------------------------------------- # TimedeltaIndex.shift is used by __add__/__sub__
Implements _sub_period_array in DatetimeIndexOpsMixin. Behavior is analogous to subtraction of Period scalar. - [x] closes #13077 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20049
2018-03-08T02:45:24Z
2018-06-29T00:45:49Z
2018-06-29T00:45:49Z
2020-04-05T17:41:09Z
Added 'displayed_only' option to 'read_html'
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 302f8043f3ba7..bea897e1b88e6 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -343,6 +343,7 @@ Other Enhancements - :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`) - :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row. ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) +- :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/io/html.py b/pandas/io/html.py index be4854bc19cc6..300a5a151f5d2 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -160,6 +160,14 @@ class _HtmlFrameParser(object): attrs : dict List of HTML <table> element attributes to match. + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + .. versionadded:: 0.23.0 + Attributes ---------- io : str or file-like @@ -172,6 +180,14 @@ class _HtmlFrameParser(object): A dictionary of valid table attributes to use to search for table elements. + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + .. versionadded:: 0.23.0 + Notes ----- To subclass this class effectively you must override the following methods: @@ -187,11 +203,12 @@ class _HtmlFrameParser(object): functionality. """ - def __init__(self, io, match, attrs, encoding): + def __init__(self, io, match, attrs, encoding, displayed_only): self.io = io self.match = match self.attrs = attrs self.encoding = encoding + self.displayed_only = displayed_only def parse_tables(self): tables = self._parse_tables(self._build_doc(), self.match, self.attrs) @@ -380,6 +397,27 @@ def _parse_raw_tbody(self, table): res = self._parse_tr(table) return self._parse_raw_data(res) + def _handle_hidden_tables(self, tbl_list, attr_name): + """Returns list of tables, potentially removing hidden elements + + Parameters + ---------- + tbl_list : list of Tag or list of Element + Type of list elements will vary depending upon parser used + attr_name : str + Name of the accessor for retrieving HTML attributes + + Returns + ------- + list of Tag or list of Element + Return type matches `tbl_list` + """ + if not self.displayed_only: + return tbl_list + + return [x for x in tbl_list if "display:none" not in + getattr(x, attr_name).get('style', '').replace(" ", "")] + class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): """HTML to DataFrame parser that uses BeautifulSoup under the hood. @@ -431,8 +469,14 @@ def _parse_tables(self, doc, match, attrs): result = [] unique_tables = set() + tables = self._handle_hidden_tables(tables, "attrs") for table in tables: + if self.displayed_only: + for elem in table.find_all( + style=re.compile(r"display:\s*none")): + elem.decompose() + if (table not in unique_tables and table.find(text=match) is not None): result.append(table) @@ -528,6 +572,17 @@ def _parse_tables(self, doc, match, kwargs): tables = doc.xpath(xpath_expr, namespaces=_re_namespace) + tables = self._handle_hidden_tables(tables, "attrib") + if self.displayed_only: + for table in tables: + # lxml utilizes XPATH 1.0 which does not have regex + # support. As a result, we find all elements with a style + # attribute and iterate them to check for display:none + for elem in table.xpath('.//*[@style]'): + if "display:none" in elem.attrib.get( + "style", "").replace(" ", ""): + elem.getparent().remove(elem) + if not tables: raise ValueError("No tables found matching regex {patt!r}" .format(patt=pattern)) @@ -729,7 +784,7 @@ def _validate_flavor(flavor): return flavor -def _parse(flavor, io, match, attrs, encoding, **kwargs): +def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): flavor = _validate_flavor(flavor) compiled_match = re.compile(match) # you can pass a compiled regex here @@ -737,7 +792,7 @@ def _parse(flavor, io, match, attrs, encoding, **kwargs): retained = None for flav in flavor: parser = _parser_dispatch(flav) - p = parser(io, compiled_match, attrs, encoding) + p = parser(io, compiled_match, attrs, encoding, displayed_only) try: tables = p.parse_tables() @@ -773,7 +828,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, tupleize_cols=None, thousands=',', encoding=None, decimal='.', converters=None, na_values=None, - keep_default_na=True): + keep_default_na=True, displayed_only=True): r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters @@ -877,6 +932,11 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, .. versionadded:: 0.19.0 + display_only : bool, default True + Whether elements with "display: none" should be parsed + + .. versionadded:: 0.23.0 + Returns ------- dfs : list of DataFrames @@ -924,4 +984,5 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, parse_dates=parse_dates, tupleize_cols=tupleize_cols, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, - keep_default_na=keep_default_na) + keep_default_na=keep_default_na, + displayed_only=displayed_only) diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 151a0750b7f6e..b18104e951504 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -674,6 +674,39 @@ def test_wikipedia_states_table(self): result = self.read_html(data, 'Arizona', header=1)[0] assert result['sq mi'].dtype == np.dtype('float64') + @pytest.mark.parametrize("displayed_only,exp0,exp1", [ + (True, DataFrame(["foo"]), None), + (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))]) + def test_displayed_only(self, displayed_only, exp0, exp1): + # GH 20027 + data = StringIO("""<html> + <body> + <table> + <tr> + <td> + foo + <span style="display:none;text-align:center">bar</span> + <span style="display:none">baz</span> + <span style="display: none">qux</span> + </td> + </tr> + </table> + <table style="display: none"> + <tr> + <td>foo</td> + </tr> + </table> + </body> + </html>""") + + dfs = self.read_html(data, displayed_only=displayed_only) + tm.assert_frame_equal(dfs[0], exp0) + + if exp1 is not None: + tm.assert_frame_equal(dfs[1], exp1) + else: + assert len(dfs) == 1 # Should not parse hidden table + def test_decimal_rows(self): # GH 12907 @@ -896,6 +929,39 @@ def test_computer_sales_page(self): data = os.path.join(DATA_PATH, 'computer_sales_page.html') self.read_html(data, header=[0, 1]) + @pytest.mark.parametrize("displayed_only,exp0,exp1", [ + (True, DataFrame(["foo"]), None), + (False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))]) + def test_displayed_only(self, displayed_only, exp0, exp1): + # GH 20027 + data = StringIO("""<html> + <body> + <table> + <tr> + <td> + foo + <span style="display:none;text-align:center">bar</span> + <span style="display:none">baz</span> + <span style="display: none">qux</span> + </td> + </tr> + </table> + <table style="display: none"> + <tr> + <td>foo</td> + </tr> + </table> + </body> + </html>""") + + dfs = self.read_html(data, displayed_only=displayed_only) + tm.assert_frame_equal(dfs[0], exp0) + + if exp1 is not None: + tm.assert_frame_equal(dfs[1], exp1) + else: + assert len(dfs) == 1 # Should not parse hidden table + def test_invalid_flavor(): url = 'google.com'
- [X] closes #20027 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20047
2018-03-07T23:52:29Z
2018-03-10T01:53:35Z
2018-03-10T01:53:35Z
2018-05-14T21:11:38Z
ENH: Allow rename_axis to specify index and columns arguments
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 835c4cc9d4ab3..608e2c8e72ded 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -503,6 +503,47 @@ method, allowing you to permute the hierarchical index levels in one step: df[:5].reorder_levels([1,0], axis=0) +.. _advanced.index_names: + +Renaming names of an ``Index`` or ``MultiIndex`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :meth:`~DataFrame.rename` method is used to rename the labels of a +``MultiIndex``, and is typically used to rename the columns of a ``DataFrame``. +The ``columns`` argument of ``rename`` allows a dictionary to be specified +that includes only the columns you wish to rename. + +.. ipython:: python + + df.rename(columns={0: "col0", 1: "col1"}) + +This method can also be used to rename specific labels of the main index +of the ``DataFrame``. + +.. ipython:: python + + df.rename(index={"one" : "two", "y" : "z"}) + +The :meth:`~DataFrame.rename_axis` method is used to rename the name of a +``Index`` or ``MultiIndex``. In particular, the names of the levels of a +``MultiIndex`` can be specified, which is useful if ``reset_index()`` is later +used to move the values from the ``MultiIndex`` to a column. + +.. ipython:: python + + df.rename_axis(index=['abc', 'def']) + +Note that the columns of a ``DataFrame`` are an index, so that using +``rename_axis`` with the ``columns`` argument will change the name of that +index. + +.. ipython:: python + + df.rename_axis(columns="Cols").columns + +Both ``rename`` and ``rename_axis`` support specifying a dictionary, +``Series`` or a mapping function to map labels/names to new values. + Sorting a ``MultiIndex`` ------------------------ diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 6eeb97349100a..16ab345fd1744 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1466,8 +1466,21 @@ for altering the ``Series.name`` attribute. .. _basics.rename_axis: -The Panel class has a related :meth:`~Panel.rename_axis` class which can rename -any of its three axes. +.. versionadded:: 0.24.0 + +The methods :meth:`~DataFrame.rename_axis` and :meth:`~Series.rename_axis` +allow specific names of a `MultiIndex` to be changed (as opposed to the +labels). + +.. ipython:: python + + df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], + 'y': [10, 20, 30, 40, 50, 60]}, + index=pd.MultiIndex.from_product([['a', 'b', 'c'], [1, 2]], + names=['let', 'num'])) + df + df.rename_axis(index={'let': 'abc'}) + df.rename_axis(index=str.upper) .. _basics.iteration: diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ba164a82c162e..ddf5fffb1d80b 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -180,6 +180,29 @@ array, but rather an ``ExtensionArray``: This is the same behavior as ``Series.values`` for categorical data. See :ref:`whatsnew_0240.api_breaking.interval_values` for more. +.. _whatsnew_0240.enhancements.rename_axis: + +Renaming names in a MultiIndex +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`DataFrame.rename_axis` now supports ``index`` and ``columns`` arguments +and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`) + +This change allows a dictionary to be passed so that some of the names +of a ``MultiIndex`` can be changed. + +Example: + +.. ipython:: python + + mi = pd.MultiIndex.from_product([list('AB'), list('CD'), list('EF')], + names=['AB', 'CD', 'EF']) + df = pd.DataFrame([i for i in range(len(mi))], index=mi, columns=['N']) + df + df.rename_axis(index={'CD': 'New'}) + +See the :ref:`advanced docs on renaming<advanced.index_names>` for more details. + .. _whatsnew_0240.enhancements.other: Other Enhancements diff --git a/pandas/core/common.py b/pandas/core/common.py index f6e40faa79740..0a82dd8636888 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -454,3 +454,21 @@ def _pipe(obj, func, *args, **kwargs): return func(*args, **kwargs) else: return func(obj, *args, **kwargs) + + +def _get_rename_function(mapper): + """ + Returns a function that will map names/labels, dependent if mapper + is a dict, Series or just a function. + """ + if isinstance(mapper, (compat.Mapping, ABCSeries)): + + def f(x): + if x in mapper: + return mapper[x] + else: + return x + else: + f = mapper + + return f diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 693cd14c8ca1d..db10494f0724d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -53,7 +53,8 @@ isidentifier, set_function_name, cPickle as pkl) from pandas.core.ops import _align_method_FRAME import pandas.core.nanops as nanops -from pandas.util._decorators import Appender, Substitution +from pandas.util._decorators import (Appender, Substitution, + rewrite_axis_style_signature) from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core import config @@ -1079,20 +1080,6 @@ def rename(self, *args, **kwargs): if com.count_not_none(*axes.values()) == 0: raise TypeError('must pass an index to rename') - # renamer function if passed a dict - def _get_rename_function(mapper): - if isinstance(mapper, (dict, ABCSeries)): - - def f(x): - if x in mapper: - return mapper[x] - else: - return x - else: - f = mapper - - return f - self._consolidate_inplace() result = self if inplace else self.copy(deep=copy) @@ -1101,7 +1088,7 @@ def f(x): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue - f = _get_rename_function(v) + f = com._get_rename_function(v) baxis = self._get_block_manager_axis(axis) if level is not None: @@ -1115,16 +1102,28 @@ def f(x): else: return result.__finalize__(self) - def rename_axis(self, mapper, axis=0, copy=True, inplace=False): + @rewrite_axis_style_signature('mapper', [('copy', True), + ('inplace', False)]) + def rename_axis(self, mapper=None, **kwargs): """ - Alter the name of the index or columns. + Alter the name of the index or name of Index object that is the + columns. Parameters ---------- mapper : scalar, list-like, optional - Value to set as the axis name attribute. - axis : {0 or 'index', 1 or 'columns'}, default 0 - The index or the name of the axis. + Value to set the axis name attribute. + index, columns : scalar, list-like, dict-like or function, optional + dict-like or functions transformations to apply to + that axis' values. + + Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` + and/or ``columns``. + + .. versionchanged:: 0.24.0 + + axis : int or string, default 0 copy : boolean, default True Also copy underlying data. inplace : boolean, default False @@ -1143,6 +1142,23 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): deprecated and will be removed in a future version. Use ``rename`` instead. + ``DataFrame.rename_axis`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...)`` + * ``(mapper, axis={'index', 'columns'}, ...)`` + + The first calling convention will only modify the names of + the index and/or the names of the Index object that is the columns. + In this case, the parameter ``copy`` is ignored. + + The second calling convention will modify the names of the + the corresponding index if mapper is a list or a scalar. + However, if mapper is dict-like or a function, it will use the + deprecated behavior of modifying the axis *labels*. + + We *highly* recommend using keyword arguments to clarify your + intent. + See Also -------- pandas.Series.rename : Alter Series index labels or name @@ -1176,20 +1192,94 @@ def rename_axis(self, mapper, axis=0, copy=True, inplace=False): 0 1 4 1 2 5 2 3 6 - """ + + >>> mi = pd.MultiIndex.from_product([['a', 'b', 'c'], [1, 2]], + ... names=['let','num']) + >>> df = pd.DataFrame({'x': [i for i in range(len(mi))], + ... 'y' : [i*10 for i in range(len(mi))]}, + ... index=mi) + >>> df.rename_axis(index={'num' : 'n'}) + x y + let n + a 1 0 0 + 2 1 10 + b 1 2 20 + 2 3 30 + c 1 4 40 + 2 5 50 + + >>> cdf = df.rename_axis(columns='col') + >>> cdf + col x y + let num + a 1 0 0 + 2 1 10 + b 1 2 20 + 2 3 30 + c 1 4 40 + 2 5 50 + + >>> cdf.rename_axis(columns=str.upper) + COL x y + let num + a 1 0 0 + 2 1 10 + b 1 2 20 + 2 3 30 + c 1 4 40 + 2 5 50 + + """ + axes, kwargs = self._construct_axes_from_arguments((), kwargs) + copy = kwargs.pop('copy', True) + inplace = kwargs.pop('inplace', False) + axis = kwargs.pop('axis', 0) + if axis is not None: + axis = self._get_axis_number(axis) + + if kwargs: + raise TypeError('rename_axis() got an unexpected keyword ' + 'argument "{0}"'.format(list(kwargs.keys())[0])) + inplace = validate_bool_kwarg(inplace, 'inplace') - non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not - is_dict_like(mapper)) - if non_mapper: - return self._set_axis_name(mapper, axis=axis, inplace=inplace) + + if (mapper is not None): + # Use v0.23 behavior if a scalar or list + non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not + is_dict_like(mapper)) + if non_mapper: + return self._set_axis_name(mapper, axis=axis, inplace=inplace) + else: + # Deprecated (v0.21) behavior is if mapper is specified, + # and not a list or scalar, then call rename + msg = ("Using 'rename_axis' to alter labels is deprecated. " + "Use '.rename' instead") + warnings.warn(msg, FutureWarning, stacklevel=3) + axis = self._get_axis_name(axis) + d = {'copy': copy, 'inplace': inplace} + d[axis] = mapper + return self.rename(**d) else: - msg = ("Using 'rename_axis' to alter labels is deprecated. " - "Use '.rename' instead") - warnings.warn(msg, FutureWarning, stacklevel=2) - axis = self._get_axis_name(axis) - d = {'copy': copy, 'inplace': inplace} - d[axis] = mapper - return self.rename(**d) + # Use new behavior. Means that index and/or columns + # is specified + result = self if inplace else self.copy(deep=copy) + + for axis in lrange(self._AXIS_LEN): + v = axes.get(self._AXIS_NAMES[axis]) + if v is None: + continue + non_mapper = is_scalar(v) or (is_list_like(v) and not + is_dict_like(v)) + if non_mapper: + newnames = v + else: + f = com._get_rename_function(v) + curnames = self._get_axis(axis).names + newnames = [f(name) for name in curnames] + result._set_axis_name(newnames, axis=axis, + inplace=True) + if not inplace: + return result def _set_axis_name(self, name, axis=0, inplace=False): """ diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 0bf2d537f3698..0752c125b75eb 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -538,6 +538,52 @@ def test_rename_axis_warns(self): df['A'].rename_axis(id) assert 'rename' in str(w[0].message) + def test_rename_axis_mapper(self): + # GH 19978 + mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]], + names=['ll', 'nn']) + df = DataFrame({'x': [i for i in range(len(mi))], + 'y': [i * 10 for i in range(len(mi))]}, + index=mi) + + # Test for rename of the Index object of columns + result = df.rename_axis('cols', axis=1) + tm.assert_index_equal(result.columns, + Index(['x', 'y'], name='cols')) + + # Test for rename of the Index object of columns using dict + result = result.rename_axis(columns={'cols': 'new'}, axis=1) + tm.assert_index_equal(result.columns, + Index(['x', 'y'], name='new')) + + # Test for renaming index using dict + result = df.rename_axis(index={'ll': 'foo'}) + assert result.index.names == ['foo', 'nn'] + + # Test for renaming index using a function + result = df.rename_axis(index=str.upper, axis=0) + assert result.index.names == ['LL', 'NN'] + + # Test for renaming index providing complete list + result = df.rename_axis(index=['foo', 'goo']) + assert result.index.names == ['foo', 'goo'] + + # Test for changing index and columns at same time + sdf = df.reset_index().set_index('nn').drop(columns=['ll', 'y']) + result = sdf.rename_axis(index='foo', columns='meh') + assert result.index.name == 'foo' + assert result.columns.name == 'meh' + + # Test different error cases + with tm.assert_raises_regex(TypeError, 'Must pass'): + df.rename_axis(index='wrong') + + with tm.assert_raises_regex(ValueError, 'Length of names'): + df.rename_axis(index=['wrong']) + + with tm.assert_raises_regex(TypeError, 'bogus'): + df.rename_axis(bogus=None) + def test_rename_multiindex(self): tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')] diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 9dfb493cb129c..337eb74b3b51a 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1345,8 +1345,8 @@ def test_append_with_strings(self): with catch_warnings(record=True): simplefilter("ignore", FutureWarning) wp = tm.makePanel() - wp2 = wp.rename_axis( - {x: "%s_extra" % x for x in wp.minor_axis}, axis=2) + wp2 = wp.rename( + minor_axis={x: "%s_extra" % x for x in wp.minor_axis}) def check_col(key, name, size): assert getattr(store.get_storer(key) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index e65a2e9f9d4fa..23bf8896409c9 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1553,12 +1553,11 @@ def df(): panel1 = make_panel() panel2 = make_panel() - panel2 = panel2.rename_axis({x: "%s_1" % x - for x in panel2.major_axis}, - axis=1) + panel2 = panel2.rename(major_axis={x: "%s_1" % x + for x in panel2.major_axis}) - panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1) - panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2) + panel3 = panel2.rename(major_axis=lambda x: '%s_1' % x) + panel3 = panel3.rename(minor_axis=lambda x: '%s_1' % x) # it works! concat([panel1, panel3], axis=1, verify_integrity=True, sort=sort) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index be3d0cd6d929c..00e145680c7a6 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -226,6 +226,24 @@ def test_reorder_levels(self): expected = Series(np.arange(6), index=e_idx) tm.assert_series_equal(result, expected) + def test_rename_axis_mapper(self): + # GH 19978 + mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]], + names=['ll', 'nn']) + s = Series([i for i in range(len(mi))], index=mi) + + result = s.rename_axis(index={'ll': 'foo'}) + assert result.index.names == ['foo', 'nn'] + + result = s.rename_axis(index=str.upper, axis=0) + assert result.index.names == ['LL', 'NN'] + + result = s.rename_axis(index=['foo', 'goo']) + assert result.index.names == ['foo', 'goo'] + + with tm.assert_raises_regex(TypeError, 'unexpected'): + s.rename_axis(columns='wrong') + def test_rename_axis_inplace(self, datetime_series): # GH 15704 expected = datetime_series.rename_axis('foo') diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 51c779c6a97a3..775fcc2684f42 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2110,16 +2110,16 @@ def test_repr_empty(self): def test_rename(self): mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'} - renamed = self.panel.rename_axis(mapper, axis=0) + renamed = self.panel.rename(items=mapper) exp = Index(['foo', 'bar', 'baz']) tm.assert_index_equal(renamed.items, exp) - renamed = self.panel.rename_axis(str.lower, axis=2) + renamed = self.panel.rename(minor_axis=str.lower) exp = Index(['a', 'b', 'c', 'd']) tm.assert_index_equal(renamed.minor_axis, exp) # don't copy - renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False) + renamed_nocopy = self.panel.rename(items=mapper, copy=False) renamed_nocopy['foo'] = 3. assert (self.panel['ItemA'].values == 3).all()
- [x] closes #19978 - [x] tests added / passed - `test_rename_axis_mapper` for `DataFrame` and `Series` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/20046
2018-03-07T23:21:13Z
2018-10-29T18:53:18Z
2018-10-29T18:53:17Z
2018-11-02T13:24:52Z
DOC/CI: temp pin matplotlib for doc build
diff --git a/ci/requirements-3.6_DOC.run b/ci/requirements-3.6_DOC.run index 084f38ce17eb2..fa9cab32c0ac2 100644 --- a/ci/requirements-3.6_DOC.run +++ b/ci/requirements-3.6_DOC.run @@ -5,7 +5,7 @@ sphinx nbconvert nbformat notebook -matplotlib +matplotlib=2.1* seaborn scipy lxml
xref https://github.com/pandas-dev/pandas/issues/20031
https://api.github.com/repos/pandas-dev/pandas/pulls/20045
2018-03-07T21:55:11Z
2018-03-08T10:14:06Z
2018-03-08T10:14:06Z
2018-03-08T10:14:17Z
DOC: Update Kurt Docstr
diff --git a/pandas/core/window.py b/pandas/core/window.py index cef012bb33e9f..c41b07759d555 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -906,21 +906,23 @@ def skew(self, **kwargs): Parameters ---------- - kwargs : Under Review + **kwargs + Under Review. Returns ------- - Series or DataFrame (matches input) - Like-indexed object containing the result of function application + Series or DataFrame + Returned object type is determined by the caller of the %(name)s + calculation See Also -------- - pandas.Series.%(name)s - pandas.DataFrame.%(name)s - pandas.Series.kurtosis - pandas.DataFrame.kurtosis - scipy.stats.skew - scipy.stats.kurtosis + Series.%(name)s : Calling object with Series data + DataFrame.%(name)s : Calling object with DataFrames + Series.kurt : Equivalent method for Series + DataFrame.kurt : Equivalent method for DataFrame + scipy.stats.skew : Third moment of a probability density + scipy.stats.kurtosis : Reference SciPy method Notes ----- @@ -932,19 +934,20 @@ def skew(self, **kwargs): four matching the equivalent function call using `scipy.stats`. >>> arr = [1, 2, 3, 4, 999] + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits >>> import scipy.stats - >>> print("{0:.6f}".format(scipy.stats.kurtosis(arr[:-1], bias=False))) + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) -1.200000 - >>> print("{0:.6f}".format(scipy.stats.kurtosis(arr[1:], bias=False))) + >>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False))) 3.999946 - >>> df = pd.DataFrame(arr) - >>> df.rolling(4).kurt() - 0 - 0 NaN - 1 NaN - 2 NaN - 3 -1.200000 - 4 3.999946 + >>> s = pd.Series(arr) + >>> s.rolling(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 3.999946 + dtype: float64 """) def kurt(self, **kwargs):
Continuation of #19999
https://api.github.com/repos/pandas-dev/pandas/pulls/20044
2018-03-07T21:49:17Z
2018-03-09T10:37:27Z
2018-03-09T10:37:27Z
2018-03-10T04:58:30Z
Refactored GroupBy ASVs
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 3e7e5c821b14c..7777322071957 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -14,7 +14,10 @@ method_blacklist = { 'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean', 'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min', - 'var', 'mad', 'describe', 'std'} + 'var', 'mad', 'describe', 'std'}, + 'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew', + 'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe', + 'std'} } @@ -90,45 +93,6 @@ def time_series_groups(self, data, key): self.ser.groupby(self.ser).groups -class FirstLast(object): - - goal_time = 0.2 - - param_names = ['dtype'] - params = ['float32', 'float64', 'datetime', 'object'] - - def setup(self, dtype): - N = 10**5 - # with datetimes (GH7555) - if dtype == 'datetime': - self.df = DataFrame({'values': date_range('1/1/2011', - periods=N, - freq='s'), - 'key': range(N)}) - elif dtype == 'object': - self.df = DataFrame({'values': ['foo'] * N, - 'key': range(N)}) - else: - labels = np.arange(N / 10).repeat(10) - data = Series(np.random.randn(len(labels)), dtype=dtype) - data[::3] = np.nan - data[1::3] = np.nan - labels = labels.take(np.random.permutation(len(labels))) - self.df = DataFrame({'values': data, 'key': labels}) - - def time_groupby_first(self, dtype): - self.df.groupby('key').first() - - def time_groupby_last(self, dtype): - self.df.groupby('key').last() - - def time_groupby_nth_all(self, dtype): - self.df.groupby('key').nth(0, dropna='all') - - def time_groupby_nth_none(self, dtype): - self.df.groupby('key').nth(0) - - class GroupManyLabels(object): goal_time = 0.2 @@ -149,39 +113,40 @@ class Nth(object): goal_time = 0.2 - def setup_cache(self): - df = DataFrame(np.random.randint(1, 100, (10000, 2))) - df.iloc[1, 1] = np.nan - return df - - def time_frame_nth_any(self, df): - df.groupby(0).nth(0, dropna='any') - - def time_frame_nth(self, df): - df.groupby(0).nth(0) - + param_names = ['dtype'] + params = ['float32', 'float64', 'datetime', 'object'] - def time_series_nth_any(self, df): - df[1].groupby(df[0]).nth(0, dropna='any') + def setup(self, dtype): + N = 10**5 + # with datetimes (GH7555) + if dtype == 'datetime': + values = date_range('1/1/2011', periods=N, freq='s') + elif dtype == 'object': + values = ['foo'] * N + else: + values = np.arange(N).astype(dtype) - def time_series_nth(self, df): - df[1].groupby(df[0]).nth(0) + key = np.arange(N) + self.df = DataFrame({'key': key, 'values': values}) + self.df.iloc[1, 1] = np.nan # insert missing data + def time_frame_nth_any(self, dtype): + self.df.groupby('key').nth(0, dropna='any') -class NthObject(object): + def time_groupby_nth_all(self, dtype): + self.df.groupby('key').nth(0, dropna='all') - goal_time = 0.2 + def time_frame_nth(self, dtype): + self.df.groupby('key').nth(0) - def setup_cache(self): - df = DataFrame(np.random.randint(1, 100, (10000,)), columns=['g']) - df['obj'] = ['a'] * 5000 + ['b'] * 5000 - return df + def time_series_nth_any(self, dtype): + self.df['values'].groupby(self.df['key']).nth(0, dropna='any') - def time_nth(self, df): - df.groupby('g').nth(5) + def time_groupby_nth_all(self, dtype): + self.df['values'].groupby(self.df['key']).nth(0, dropna='all') - def time_nth_last(self, df): - df.groupby('g').last() + def time_series_nth(self, dtype): + self.df['values'].groupby(self.df['key']).nth(0) class DateAttributes(object): @@ -243,7 +208,7 @@ def time_multi_count(self, df): df.groupby(['key1', 'key2']).count() -class CountInt(object): +class CountMultiInt(object): goal_time = 0.2 @@ -255,10 +220,10 @@ def setup_cache(self): 'ints2': np.random.randint(0, 1000, size=n)}) return df - def time_int_count(self, df): + def time_multi_int_count(self, df): df.groupby(['key1', 'key2']).count() - def time_int_nunique(self, df): + def time_multi_int_nunique(self, df): df.groupby(['key1', 'key2']).nunique() @@ -266,7 +231,7 @@ class AggFunctions(object): goal_time = 0.2 - def setup_cache(self): + def setup_cache(): N = 10**5 fac1 = np.array(['A', 'B', 'C'], dtype='O') fac2 = np.array(['one', 'two'], dtype='O') @@ -361,9 +326,6 @@ def setup(self): def time_multi_size(self): self.df.groupby(['key1', 'key2']).size() - def time_dt_size(self): - self.df.groupby(['dates']).size() - def time_dt_timegrouper_size(self): with warnings.catch_warnings(record=True): self.df.groupby(TimeGrouper(key='dates', freq='M')).size() @@ -376,15 +338,16 @@ class GroupByMethods(object): goal_time = 0.2 - param_names = ['dtype', 'method'] - params = [['int', 'float', 'object'], + param_names = ['dtype', 'method', 'application'] + params = [['int', 'float', 'object', 'datetime'], ['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin', 'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head', 'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique', 'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew', - 'std', 'sum', 'tail', 'unique', 'value_counts', 'var']] + 'std', 'sum', 'tail', 'unique', 'value_counts', 'var'], + ['direct', 'transformation']] - def setup(self, dtype, method): + def setup(self, dtype, method, application): if method in method_blacklist.get(dtype, {}): raise NotImplementedError # skip benchmark ngroups = 1000 @@ -398,12 +361,28 @@ def setup(self, dtype, method): np.random.random(ngroups) * 10.0]) elif dtype == 'object': key = ['foo'] * size + elif dtype == 'datetime': + key = date_range('1/1/2011', periods=size, freq='s') df = DataFrame({'values': values, 'key': key}) - self.df_groupby_method = getattr(df.groupby('key')['values'], method) - def time_method(self, dtype, method): - self.df_groupby_method() + if application == 'transform': + if method == 'describe': + raise NotImplementedError + + self.as_group_method = lambda: df.groupby( + 'key')['values'].transform(method) + self.as_field_method = lambda: df.groupby( + 'values')['key'].transform(method) + else: + self.as_group_method = getattr(df.groupby('key')['values'], method) + self.as_field_method = getattr(df.groupby('values')['key'], method) + + def time_dtype_as_group(self, dtype, method, application): + self.as_group_method() + + def time_dtype_as_field(self, dtype, method, application): + self.as_field_method() class Float32(object):
- [X] closes #19733 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` I ended up mixing in some cleanup as part of this change
https://api.github.com/repos/pandas-dev/pandas/pulls/20043
2018-03-07T20:39:32Z
2018-03-10T02:03:51Z
2018-03-10T02:03:51Z
2018-05-14T21:11:40Z
DOC: Improve pandas.Series.plot.kde docstring and kwargs rewording for whole file
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 98fdcf8f94ae0..520c6cecce6d7 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2532,7 +2532,8 @@ def line(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2556,7 +2557,8 @@ def bar(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2571,7 +2573,8 @@ def barh(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2586,7 +2589,8 @@ def box(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2603,7 +2607,8 @@ def hist(self, bins=10, **kwds): bins: integer, default 10 Number of histogram bins to be used `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2613,26 +2618,74 @@ def hist(self, bins=10, **kwds): def kde(self, bw_method=None, ind=None, **kwds): """ - Kernel Density Estimate plot + Kernel Density Estimate plot using Gaussian kernels. + + In statistics, kernel density estimation (KDE) is a non-parametric way + to estimate the probability density function (PDF) of a random + variable. This function uses Gaussian kernels and includes automatic + bandwith determination. Parameters ---------- - bw_method: str, scalar or callable, optional - The method used to calculate the estimator bandwidth. This can be + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If None (default), 'scott' is used. See :class:`scipy.stats.gaussian_kde` for more information. ind : NumPy array or integer, optional - Evaluation points. If None (default), 1000 equally spaced points - are used. If `ind` is a NumPy array, the kde is evaluated at the - points passed. If `ind` is an integer, `ind` number of equally - spaced points are used. - `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Evaluation points for the estimated PDF. If None (default), + 1000 equally spaced points are used. If `ind` is a NumPy array, the + kde is evaluated at the points passed. If `ind` is an integer, + `ind` number of equally spaced points are used. + kwds : optional + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- axes : matplotlib.AxesSubplot or np.array of them + + See also + -------- + scipy.stats.gaussian_kde : Representation of a kernel-density + estimate using Gaussian kernels. This is the function used + internally to estimate the PDF. + + Examples + -------- + Given a Series of points randomly sampled from an unknown + distribution, estimate this distribution using KDE with automatic + bandwidth determination and plot the results, evaluating them at + 1000 equally spaced points (default): + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5]) + >>> ax = s.plot.kde() + + + An scalar fixed bandwidth can be specified. Using a too small bandwidth + can lead to overfitting, while a too large bandwidth can result in + underfitting: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=0.3) + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=3) + + Finally, the `ind` parameter determines the evaluation points for the + plot of the estimated PDF: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5]) """ return self(kind='kde', bw_method=bw_method, ind=ind, **kwds) @@ -2645,7 +2698,8 @@ def area(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2660,7 +2714,8 @@ def pie(self, **kwds): Parameters ---------- `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + Additional keyword arguments are documented in + :meth:`pandas.Series.plot`. Returns ------- @@ -2711,7 +2766,8 @@ def line(self, x=None, y=None, **kwds): x, y : label or position, optional Coordinates for each point. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2728,7 +2784,8 @@ def bar(self, x=None, y=None, **kwds): x, y : label or position, optional Coordinates for each point. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2745,7 +2802,8 @@ def barh(self, x=None, y=None, **kwds): x, y : label or position, optional Coordinates for each point. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2762,7 +2820,8 @@ def box(self, by=None, **kwds): by : string or sequence Column in the DataFrame to group by. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2781,7 +2840,8 @@ def hist(self, by=None, bins=10, **kwds): bins: integer, default 10 Number of histogram bins to be used `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2806,7 +2866,8 @@ def kde(self, bw_method=None, ind=None, **kwds): points passed. If `ind` is an integer, `ind` number of equally spaced points are used. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2825,7 +2886,8 @@ def area(self, x=None, y=None, **kwds): x, y : label or position, optional Coordinates for each point. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2842,7 +2904,8 @@ def pie(self, y=None, **kwds): y : label or position, optional Column to plot. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2863,7 +2926,8 @@ def scatter(self, x, y, s=None, c=None, **kwds): c : label or position, optional Color of each point. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns ------- @@ -2888,7 +2952,8 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, gridsize : int, optional Number of bins. `**kwds` : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + Additional keyword arguments are documented in + :meth:`pandas.DataFrame.plot`. Returns -------
Initial docstring assigned to the Barcelona Chapter for the documentation sprint. Credits to @arnau126 who also contributed to this change. Also, rewording of `kwargs` argument explanation for the whole file, where we refer to the `pandas.Series.plot` docstring for info.
https://api.github.com/repos/pandas-dev/pandas/pulls/20041
2018-03-07T17:38:12Z
2018-03-10T21:33:42Z
2018-03-10T21:33:42Z
2018-03-11T09:30:13Z
TYP: Mypy workaround for NoDefault
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index ad77e9e533b0b..d4a766f7086af 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -23,9 +23,11 @@ ndarray_obj_2d = np.ndarray from enum import Enum -class NoDefault(Enum): ... +class _NoDefault(Enum): + no_default = ... -no_default: NoDefault +no_default = _NoDefault.no_default +NoDefault = Literal[_NoDefault.no_default] i8max: int u8max: int diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 13bd95004445d..4e245d1bd8693 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1,6 +1,7 @@ from collections import abc from decimal import Decimal from enum import Enum +from typing import Literal import warnings cimport cython @@ -2791,7 +2792,7 @@ cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas): return result -class NoDefault(Enum): +class _NoDefault(Enum): # We make this an Enum # 1) because it round-trips through pickle correctly (see GH#40397) # 2) because mypy does not understand singletons @@ -2802,7 +2803,8 @@ class NoDefault(Enum): # Note: no_default is exported to the public API in pandas.api.extensions -no_default = NoDefault.no_default # Sentinel indicating the default value. +no_default = _NoDefault.no_default # Sentinel indicating the default value. +NoDefault = Literal[_NoDefault.no_default] @cython.boundscheck(False) diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index ab42fcd92a3d9..efbe9995525d7 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -112,12 +112,7 @@ def assert_almost_equal( FutureWarning, stacklevel=find_stack_level(), ) - # https://github.com/python/mypy/issues/7642 - # error: Argument 1 to "_get_tol_from_less_precise" has incompatible - # type "Union[bool, int, NoDefault]"; expected "Union[bool, int]" - rtol = atol = _get_tol_from_less_precise( - check_less_precise # type: ignore[arg-type] - ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) if isinstance(left, Index): assert_index_equal( @@ -345,12 +340,7 @@ def _get_ilevel_values(index, level): FutureWarning, stacklevel=find_stack_level(), ) - # https://github.com/python/mypy/issues/7642 - # error: Argument 1 to "_get_tol_from_less_precise" has incompatible - # type "Union[bool, int, NoDefault]"; expected "Union[bool, int]" - rtol = atol = _get_tol_from_less_precise( - check_less_precise # type: ignore[arg-type] - ) + rtol = atol = _get_tol_from_less_precise(check_less_precise) # instance validation _check_isinstance(left, right, Index) diff --git a/pandas/_typing.py b/pandas/_typing.py index e71859e91785e..a85820a403fde 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -313,7 +313,8 @@ def closed(self) -> bool: XMLParsers = Literal["lxml", "etree"] # Interval closed type -IntervalClosedType = Literal["left", "right", "both", "neither"] +IntervalLeftRight = Literal["left", "right"] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] # datetime and NaTType DatetimeNaTType = Union[datetime, "NaTType"] diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 679feaca71024..4c81fe8b61a1f 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -263,7 +263,7 @@ def _simple_new( cls: type[IntervalArrayT], left, right, - closed=None, + closed: IntervalClosedType | None = None, copy: bool = False, dtype: Dtype | None = None, verify_integrity: bool = True, @@ -416,7 +416,7 @@ def _from_factorized( def from_breaks( cls: type[IntervalArrayT], breaks, - closed="right", + closed: IntervalClosedType | None = "right", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -492,7 +492,7 @@ def from_arrays( cls: type[IntervalArrayT], left, right, - closed="right", + closed: IntervalClosedType | None = "right", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -956,10 +956,10 @@ def _concat_same_type( ------- IntervalArray """ - closed = {interval.closed for interval in to_concat} - if len(closed) != 1: + closed_set = {interval.closed for interval in to_concat} + if len(closed_set) != 1: raise ValueError("Intervals must all be closed on the same side.") - closed = closed.pop() + closed = closed_set.pop() left = np.concatenate([interval.left for interval in to_concat]) right = np.concatenate([interval.right for interval in to_concat]) @@ -1328,7 +1328,7 @@ def overlaps(self, other): # --------------------------------------------------------------------- @property - def closed(self): + def closed(self) -> IntervalClosedType: """ Whether the intervals are closed on the left-side, right-side, both or neither. diff --git a/pandas/core/common.py b/pandas/core/common.py index 2e8d6dbced4e3..eeb18759fc72c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -673,7 +673,7 @@ def resolve_numeric_only(numeric_only: bool | None | lib.NoDefault) -> bool: # first default to None result = False else: - result = cast(bool, numeric_only) + result = numeric_only return result diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d563aa8b06ca5..e4de71067665a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6279,8 +6279,7 @@ def dropna( # faster equivalent to 'agg_obj.count(agg_axis) > 0' mask = notna(agg_obj).any(axis=agg_axis, bool_only=False) else: - if how is not no_default: - raise ValueError(f"invalid how option: {how}") + raise ValueError(f"invalid how option: {how}") if np.all(mask): result = self.copy() @@ -8050,9 +8049,6 @@ def groupby( raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) - # https://github.com/python/mypy/issues/7642 - # error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type - # "Union[bool, NoDefault]"; expected "bool" return DataFrameGroupBy( obj=self, keys=by, @@ -8061,7 +8057,7 @@ def groupby( as_index=as_index, sort=sort, group_keys=group_keys, - squeeze=squeeze, # type: ignore[arg-type] + squeeze=squeeze, observed=observed, dropna=dropna, ) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1f524d07bfd3a..9feb6c0512302 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7898,8 +7898,8 @@ def between_time( FutureWarning, stacklevel=find_stack_level(), ) - left = True if isinstance(include_start, lib.NoDefault) else include_start - right = True if isinstance(include_end, lib.NoDefault) else include_end + left = True if include_start is lib.no_default else include_start + right = True if include_end is lib.no_default else include_end inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = { (True, True): "both", @@ -10689,7 +10689,6 @@ def _stat_function( if axis is None: axis = self._stat_axis_number - axis = cast(Axis, axis) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2acf5c826eb57..090554f2eafe5 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1580,7 +1580,7 @@ def idxmax( # DataFrame.idxmax for backwards compatibility numeric_only_arg = None if axis == 0 else False else: - numeric_only_arg = cast(bool, numeric_only) + numeric_only_arg = numeric_only def func(df): res = df._reduce( @@ -1616,7 +1616,7 @@ def idxmin( # DataFrame.idxmin for backwards compatibility numeric_only_arg = None if axis == 0 else False else: - numeric_only_arg = cast(bool, numeric_only) + numeric_only_arg = numeric_only def func(df): res = df._reduce( diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 6cee6c1913f74..ffbee0bf21a66 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1277,9 +1277,7 @@ def _resolve_numeric_only( else: numeric_only = False - # error: Incompatible return value type (got "Union[bool, NoDefault]", - # expected "bool") - return numeric_only # type: ignore[return-value] + return numeric_only def _maybe_warn_numeric_only_depr( self, how: str, result: DataFrame | Series, numeric_only: bool | lib.NoDefault diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 806d081c0176b..e7b810dacdf57 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,6 +34,7 @@ Dtype, DtypeObj, IntervalClosedType, + IntervalLeftRight, npt, ) from pandas.util._decorators import ( @@ -1039,12 +1040,12 @@ def date_range( DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') """ - if inclusive is not None and not isinstance(closed, lib.NoDefault): + if inclusive is not None and closed is not lib.no_default: raise ValueError( "Deprecated argument `closed` cannot be passed" "if argument `inclusive` is not None" ) - elif not isinstance(closed, lib.NoDefault): + elif closed is not lib.no_default: warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, @@ -1087,7 +1088,7 @@ def bdate_range( name: Hashable = None, weekmask=None, holidays=None, - closed: lib.NoDefault = lib.no_default, + closed: IntervalLeftRight | lib.NoDefault | None = lib.no_default, inclusive: IntervalClosedType | None = None, **kwargs, ) -> DatetimeIndex: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index c3acfc5ff2f66..a89b52e0950f2 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -251,7 +251,7 @@ def __new__( def from_breaks( cls, breaks, - closed: str = "right", + closed: IntervalClosedType | None = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, @@ -282,7 +282,7 @@ def from_arrays( cls, left, right, - closed: str = "right", + closed: IntervalClosedType = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, @@ -957,7 +957,7 @@ def interval_range( periods=None, freq=None, name: Hashable = None, - closed: lib.NoDefault = lib.no_default, + closed: IntervalClosedType | lib.NoDefault = lib.no_default, inclusive: IntervalClosedType | None = None, ) -> IntervalIndex: """ @@ -1054,12 +1054,12 @@ def interval_range( IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') """ - if inclusive is not None and not isinstance(closed, lib.NoDefault): + if inclusive is not None and closed is not lib.no_default: raise ValueError( "Deprecated argument `closed` cannot be passed " "if argument `inclusive` is not None" ) - elif not isinstance(closed, lib.NoDefault): + elif closed is not lib.no_default: warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 29df930c5aaf3..1608260205dd1 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -10,6 +10,7 @@ Hashable, Iterable, List, + Literal, Sequence, Tuple, cast, @@ -1397,7 +1398,7 @@ def format( sparsify = get_option("display.multi_sparse") if sparsify: - sentinel = "" + sentinel: Literal[""] | bool | lib.NoDefault = "" # GH3547 use value of sparsify as sentinel if it's "Falsey" assert isinstance(sparsify, bool) or sparsify is lib.no_default if sparsify in [False, lib.no_default]: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index d8c4f3f3da765..94705790e40bd 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -16,6 +16,7 @@ Timestamp, ) from pandas._libs.lib import infer_dtype +from pandas._typing import IntervalLeftRight from pandas.core.dtypes.common import ( DT64NS_DTYPE, @@ -560,7 +561,7 @@ def _format_labels( bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None ): """based on the dtype, return our labels""" - closed = "right" if right else "left" + closed: IntervalLeftRight = "right" if right else "left" formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] diff --git a/pandas/core/series.py b/pandas/core/series.py index b740bac78b263..d1514a3872800 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1952,8 +1952,6 @@ def groupby( raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) - # error: Argument "squeeze" to "SeriesGroupBy" has incompatible type - # "Union[bool, NoDefault]"; expected "bool" return SeriesGroupBy( obj=self, keys=by, @@ -1962,7 +1960,7 @@ def groupby( as_index=as_index, sort=sort, group_keys=group_keys, - squeeze=squeeze, # type: ignore[arg-type] + squeeze=squeeze, observed=observed, dropna=dropna, )
This uses the mypy-workaround from https://github.com/python/typeshed/pull/7127/files for `NoDefault` (works also for pyright). `no_default` is public but `NoDefault` is luckily not public, so it should be fine to do the following: - rename the existing `NoDefault` to `_NoDefault` (implementation and typing) - define `NoDefault` as `NoDefault = Literal[_NoDefault.no_default]` (implementation and typing; `isinstance` checks need to be re-written to use `is no_default` but all other type annotations can stay the same) - `no_default` "remains" `no_default = _NoDefault.no_default` (just replacing `NoDefault` with `_NoDefault`) @simonjayhawkins @jreback @Dr-Irv edit: Some context: The following will work without mypy errors afterwards: ```py def foo(x: NoDefault | int) -> int: if x is no_default: return -1 return x + 1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/47045
2022-05-17T22:35:53Z
2022-05-25T21:53:55Z
2022-05-25T21:53:55Z
2022-05-26T01:58:47Z
ENH: DatetimeArray fields support non-nano
diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi index e404eadf13657..b1d9e0342f81e 100644 --- a/pandas/_libs/tslibs/fields.pyi +++ b/pandas/_libs/tslibs/fields.pyi @@ -22,6 +22,7 @@ def get_start_end_field( def get_date_field( dtindex: npt.NDArray[np.int64], # const int64_t[:] field: str, + reso: int = ..., # NPY_DATETIMEUNIT ) -> npt.NDArray[np.int32]: ... def get_timedelta_field( tdindex: npt.NDArray[np.int64], # const int64_t[:] @@ -32,6 +33,7 @@ def isleapyear_arr( ) -> npt.NDArray[np.bool_]: ... def build_isocalendar_sarray( dtindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int = ..., # NPY_DATETIMEUNIT ) -> np.ndarray: ... def _get_locale_names(name_type: str, locale: str | None = ...): ... diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 57d4c27b3337d..5865b8c6877b0 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -329,7 +329,7 @@ def get_start_end_field( @cython.wraparound(False) @cython.boundscheck(False) -def get_date_field(const int64_t[:] dtindex, str field): +def get_date_field(const int64_t[:] dtindex, str field, NPY_DATETIMEUNIT reso=NPY_FR_ns): """ Given a int64-based datetime index, extract the year, month, etc., field and return an array of these values. @@ -348,7 +348,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.year return out @@ -359,7 +359,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.month return out @@ -370,7 +370,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.day return out @@ -381,8 +381,9 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.hour + # TODO: can we de-dup with period.pyx <accessor>s? return out elif field == 'm': @@ -392,7 +393,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.min return out @@ -403,7 +404,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.sec return out @@ -414,7 +415,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.us return out @@ -425,7 +426,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.ps // 1000 return out elif field == 'doy': @@ -435,7 +436,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = get_day_of_year(dts.year, dts.month, dts.day) return out @@ -446,7 +447,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dayofweek(dts.year, dts.month, dts.day) return out @@ -457,7 +458,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = get_week_of_year(dts.year, dts.month, dts.day) return out @@ -468,7 +469,7 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = dts.month out[i] = ((out[i] - 1) // 3) + 1 return out @@ -480,11 +481,11 @@ def get_date_field(const int64_t[:] dtindex, str field): out[i] = -1 continue - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) out[i] = get_days_in_month(dts.year, dts.month) return out elif field == 'is_leap_year': - return isleapyear_arr(get_date_field(dtindex, 'Y')) + return isleapyear_arr(get_date_field(dtindex, 'Y', reso=reso)) raise ValueError(f"Field {field} not supported") @@ -564,7 +565,7 @@ cpdef isleapyear_arr(ndarray years): @cython.wraparound(False) @cython.boundscheck(False) -def build_isocalendar_sarray(const int64_t[:] dtindex): +def build_isocalendar_sarray(const int64_t[:] dtindex, NPY_DATETIMEUNIT reso=NPY_FR_ns): """ Given a int64-based datetime array, return the ISO 8601 year, week, and day as a structured array. @@ -592,7 +593,7 @@ def build_isocalendar_sarray(const int64_t[:] dtindex): if dtindex[i] == NPY_NAT: ret_val = 0, 0, 0 else: - dt64_to_dtstruct(dtindex[i], &dts) + pandas_datetime_to_datetimestruct(dtindex[i], reso, &dts) ret_val = get_iso_calendar(dts.year, dts.month, dts.day) iso_years[i] = ret_val[0] diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 6f984727f4f6d..dadfad394b903 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -136,7 +136,7 @@ def f(self): values, field, self.freqstr, month_kw, reso=self._reso ) else: - result = fields.get_date_field(values, field) + result = fields.get_date_field(values, field, reso=self._reso) # these return a boolean by-definition return result @@ -146,7 +146,7 @@ def f(self): result = self._maybe_mask_results(result, fill_value=None) else: - result = fields.get_date_field(values, field) + result = fields.get_date_field(values, field, reso=self._reso) result = self._maybe_mask_results( result, fill_value=None, convert="float64" ) @@ -1403,7 +1403,7 @@ def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() - sarray = fields.build_isocalendar_sarray(values) + sarray = fields.build_isocalendar_sarray(values, reso=self._reso) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8eb5cc2dd82f6..897528cf18122 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -12,7 +12,15 @@ class TestNonNano: - @pytest.mark.parametrize("unit,reso", [("s", 7), ("ms", 8), ("us", 9)]) + @pytest.fixture(params=["s", "ms", "us"]) + def unit(self, request): + return request.param + + @pytest.fixture + def reso(self, unit): + # TODO: avoid hard-coding + return {"s": 7, "ms": 8, "us": 9}[unit] + @pytest.mark.xfail(reason="_box_func is not yet patched to get reso right") def test_non_nano(self, unit, reso): arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]") @@ -21,6 +29,22 @@ def test_non_nano(self, unit, reso): assert dta.dtype == arr.dtype assert dta[0]._reso == reso + @pytest.mark.filterwarnings( + "ignore:weekofyear and week have been deprecated:FutureWarning" + ) + @pytest.mark.parametrize( + "field", DatetimeArray._field_ops + DatetimeArray._bool_ops + ) + def test_fields(self, unit, reso, field): + dti = pd.date_range("2016-01-01", periods=55, freq="D") + arr = np.asarray(dti).astype(f"M8[{unit}]") + + dta = DatetimeArray._simple_new(arr, dtype=arr.dtype) + + res = getattr(dta, field) + expected = getattr(dti._data, field) + tm.assert_numpy_array_equal(res, expected) + class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47044
2022-05-17T22:13:21Z
2022-05-18T01:01:08Z
2022-05-18T01:01:08Z
2022-05-18T01:05:33Z
Backport PR #46394 on branch 1.4.x (CI: Use conda-forge PyPy)
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml index b86dcea59edb8..35c40f2a4aa54 100644 --- a/.github/workflows/posix.yml +++ b/.github/workflows/posix.yml @@ -155,24 +155,11 @@ jobs: channel-priority: flexible environment-file: ${{ env.ENV_FILE }} use-only-tar-bz2: true - if: ${{ env.IS_PYPY == 'false' }} # No pypy3.8 support - name: Upgrade Arrow version run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=${{ matrix.pyarrow_version }} if: ${{ matrix.pyarrow_version }} - - name: Setup PyPy - uses: actions/setup-python@v3 - with: - python-version: "pypy-3.8" - if: ${{ env.IS_PYPY == 'true' }} - - - name: Setup PyPy dependencies - run: | - # TODO: re-enable cov, its slowing the tests down though - pip install Cython numpy python-dateutil pytz pytest>=6.0 pytest-xdist>=1.31.0 pytest-asyncio>=0.17 hypothesis>=5.5.3 - if: ${{ env.IS_PYPY == 'true' }} - - name: Build Pandas uses: ./.github/actions/build_pandas diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml index ad05d2ab2dacc..eda35ee14ec65 100644 --- a/ci/deps/actions-pypy-38.yaml +++ b/ci/deps/actions-pypy-38.yaml @@ -11,6 +11,7 @@ dependencies: - cython>=0.29.24 - pytest>=6.0 - pytest-cov + - pytest-asyncio - pytest-xdist>=1.31 - hypothesis>=5.5.3
Backport PR #46394: CI: Use conda-forge PyPy
https://api.github.com/repos/pandas-dev/pandas/pulls/47040
2022-05-17T09:17:38Z
2022-05-17T11:06:54Z
2022-05-17T11:06:54Z
2022-05-17T11:06:54Z
REF: share parts of DTI and PI
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 25b7a5c3d3689..811dc72e9b908 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -210,8 +210,12 @@ def _summary(self, name=None) -> str: # -------------------------------------------------------------------- # Indexing Methods + @final def _can_partial_date_slice(self, reso: Resolution) -> bool: - raise NotImplementedError + # e.g. test_getitem_setitem_periodindex + # History of conversation GH#3452, GH#3931, GH#2369, GH#14826 + return reso > self._resolution_obj + # NB: for DTI/PI, not TDI def _parsed_string_to_bounds(self, reso: Resolution, parsed): raise NotImplementedError diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 3954cb28c2aca..5274f68eb3171 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -593,10 +593,6 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): end = self._maybe_cast_for_get_loc(end) return start, end - def _can_partial_date_slice(self, reso: Resolution) -> bool: - # History of conversation GH#3452, GH#3931, GH#2369, GH#14826 - return reso > self._resolution_obj - def _deprecate_mismatched_indexing(self, key) -> None: # GH#36148 # we get here with isinstance(key, self._data._recognized_scalars) @@ -651,12 +647,8 @@ def get_loc(self, key, method=None, tolerance=None): except KeyError as err: if method is None: raise KeyError(key) from err - try: - key = self._maybe_cast_for_get_loc(key) - except ValueError as err: - # FIXME(dateutil#1180): we get here because parse_with_reso - # doesn't raise on "t2m" - raise KeyError(key) from err + + key = self._maybe_cast_for_get_loc(key) elif isinstance(key, timedelta): # GH#20464 @@ -682,7 +674,16 @@ def get_loc(self, key, method=None, tolerance=None): def _maybe_cast_for_get_loc(self, key) -> Timestamp: # needed to localize naive datetimes or dates (GH 35690) - key = Timestamp(key) + try: + key = Timestamp(key) + except ValueError as err: + # FIXME(dateutil#1180): we get here because parse_with_reso + # doesn't raise on "t2m" + if not isinstance(key, str): + # Not expected to be reached, but check to be sure + raise # pragma: no cover + raise KeyError(key) from err + if key.tzinfo is None: key = key.tz_localize(self.tz) else: @@ -691,6 +692,13 @@ def _maybe_cast_for_get_loc(self, key) -> Timestamp: @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): + + # GH#42855 handle date here instead of get_slice_bound + if isinstance(label, date) and not isinstance(label, datetime): + # Pandas supports slicing with dates, treated as datetimes at midnight. + # https://github.com/pandas-dev/pandas/issues/31501 + label = Timestamp(label).to_pydatetime() + label = super()._maybe_cast_slice_bound(label, side, kind=kind) self._deprecate_mismatched_indexing(label) return self._maybe_cast_for_get_loc(label) @@ -722,13 +730,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default): if isinstance(start, time) or isinstance(end, time): raise KeyError("Cannot mix time and non-time slice keys") - # Pandas supports slicing with dates, treated as datetimes at midnight. - # https://github.com/pandas-dev/pandas/issues/31501 - if isinstance(start, date) and not isinstance(start, datetime): - start = datetime.combine(start, time(0, 0)) - if isinstance(end, date) and not isinstance(end, datetime): - end = datetime.combine(end, time(0, 0)) - def check_str_or_none(point): return point is not None and not isinstance(point, str) @@ -768,15 +769,6 @@ def check_str_or_none(point): else: return indexer - @doc(Index.get_slice_bound) - def get_slice_bound( - self, label, side: Literal["left", "right"], kind=lib.no_default - ) -> int: - # GH#42855 handle date here instead of _maybe_cast_slice_bound - if isinstance(label, date) and not isinstance(label, datetime): - label = Timestamp(label).to_pydatetime() - return super().get_slice_bound(label, side=side, kind=kind) - # -------------------------------------------------------------------- @property diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 592e6e9fb703d..e3ab5e8624585 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -25,7 +25,10 @@ DtypeObj, npt, ) -from pandas.util._decorators import doc +from pandas.util._decorators import ( + cache_readonly, + doc, +) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( @@ -159,6 +162,12 @@ class PeriodIndex(DatetimeIndexOpsMixin): _engine_type = libindex.PeriodEngine _supports_partial_string_indexing = True + @cache_readonly + # Signature of "_resolution_obj" incompatible with supertype "DatetimeIndexOpsMixin" + def _resolution_obj(self) -> Resolution: # type: ignore[override] + # for compat with DatetimeIndex + return self.dtype._resolution_obj + # -------------------------------------------------------------------- # methods that dispatch to array and wrap result in Index # These are defined here instead of via inherit_names for mypy @@ -446,10 +455,10 @@ def get_loc(self, key, method=None, tolerance=None): # TODO: pass if method is not None, like DTI does? raise KeyError(key) from err - if reso == self.dtype._resolution_obj: - # the reso < self.dtype._resolution_obj case goes + if reso == self._resolution_obj: + # the reso < self._resolution_obj case goes # through _get_string_slice - key = Period(parsed, freq=self.freq) + key = self._cast_partial_indexing_scalar(key) loc = self.get_loc(key, method=method, tolerance=tolerance) # Recursing instead of falling through matters for the exception # message in test_get_loc3 (though not clear if that really matters) @@ -457,28 +466,14 @@ def get_loc(self, key, method=None, tolerance=None): elif method is None: raise KeyError(key) else: - key = Period(parsed, freq=self.freq) + key = self._cast_partial_indexing_scalar(parsed) elif isinstance(key, Period): - sfreq = self.freq - kfreq = key.freq - if not ( - sfreq.n == kfreq.n - # error: "BaseOffset" has no attribute "_period_dtype_code" - and sfreq._period_dtype_code # type: ignore[attr-defined] - # error: "BaseOffset" has no attribute "_period_dtype_code" - == kfreq._period_dtype_code # type: ignore[attr-defined] - ): - # GH#42247 For the subset of DateOffsets that can be Period freqs, - # checking these two attributes is sufficient to check equality, - # and much more performant than `self.freq == key.freq` - raise KeyError(key) + key = self._maybe_cast_for_get_loc(key) + elif isinstance(key, datetime): - try: - key = Period(key, freq=self.freq) - except ValueError as err: - # we cannot construct the Period - raise KeyError(orig_key) from err + key = self._cast_partial_indexing_scalar(key) + else: # in particular integer, which Period constructor would cast to string raise KeyError(key) @@ -488,10 +483,35 @@ def get_loc(self, key, method=None, tolerance=None): except KeyError as err: raise KeyError(orig_key) from err + def _maybe_cast_for_get_loc(self, key: Period) -> Period: + # name is a misnomer, chosen for compat with DatetimeIndex + sfreq = self.freq + kfreq = key.freq + if not ( + sfreq.n == kfreq.n + # error: "BaseOffset" has no attribute "_period_dtype_code" + and sfreq._period_dtype_code # type: ignore[attr-defined] + # error: "BaseOffset" has no attribute "_period_dtype_code" + == kfreq._period_dtype_code # type: ignore[attr-defined] + ): + # GH#42247 For the subset of DateOffsets that can be Period freqs, + # checking these two attributes is sufficient to check equality, + # and much more performant than `self.freq == key.freq` + raise KeyError(key) + return key + + def _cast_partial_indexing_scalar(self, label): + try: + key = Period(label, freq=self.freq) + except ValueError as err: + # we cannot construct the Period + raise KeyError(label) from err + return key + @doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): if isinstance(label, datetime): - label = Period(label, freq=self.freq) + label = self._cast_partial_indexing_scalar(label) return super()._maybe_cast_slice_bound(label, side, kind=kind) @@ -499,11 +519,6 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): iv = Period(parsed, freq=reso.attr_abbrev) return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) - def _can_partial_date_slice(self, reso: Resolution) -> bool: - assert isinstance(reso, Resolution), (type(reso), reso) - # e.g. test_getitem_setitem_periodindex - return reso > self.dtype._resolution_obj - def period_range( start=None, end=None, periods: int | None = None, freq=None, name=None
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47038
2022-05-17T03:13:56Z
2022-05-17T12:32:02Z
2022-05-17T12:32:02Z
2022-05-17T14:37:52Z
TYP: pandas/_testing
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py index 9e89e09e418b3..1a8fe71ae3728 100644 --- a/pandas/_testing/_warnings.py +++ b/pandas/_testing/_warnings.py @@ -7,6 +7,7 @@ import re import sys from typing import ( + Literal, Sequence, Type, cast, @@ -17,7 +18,9 @@ @contextmanager def assert_produces_warning( expected_warning: type[Warning] | bool | None = Warning, - filter_level="always", + filter_level: Literal[ + "error", "ignore", "always", "default", "module", "once" + ] = "always", check_stacklevel: bool = True, raise_on_extra_warnings: bool = True, match: str | None = None, diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index ab42fcd92a3d9..b5e288690decb 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -1,6 +1,9 @@ from __future__ import annotations -from typing import cast +from typing import ( + Literal, + cast, +) import warnings import numpy as np @@ -10,6 +13,7 @@ no_default, ) from pandas._libs.missing import is_matching_na +from pandas._libs.sparse import SparseIndex import pandas._libs.testing as _testing from pandas.util._exceptions import find_stack_level @@ -61,7 +65,7 @@ def assert_almost_equal( left, right, - check_dtype: bool | str = "equiv", + check_dtype: bool | Literal["equiv"] = "equiv", check_less_precise: bool | int | NoDefault = no_default, rtol: float = 1.0e-5, atol: float = 1.0e-8, @@ -169,9 +173,8 @@ def assert_almost_equal( assert_class_equal(left, right, obj=obj) # if we have "equiv", this becomes True - check_dtype = bool(check_dtype) _testing.assert_almost_equal( - left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs + left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs ) @@ -686,7 +689,7 @@ def assert_numpy_array_equal( left, right, strict_nan=False, - check_dtype=True, + check_dtype: bool | Literal["equiv"] = True, err_msg=None, check_same=None, obj="numpy array", @@ -765,7 +768,7 @@ def _raise(left, right, err_msg): def assert_extension_array_equal( left, right, - check_dtype=True, + check_dtype: bool | Literal["equiv"] = True, index_values=None, check_less_precise=no_default, check_exact=False, @@ -858,7 +861,7 @@ def assert_extension_array_equal( _testing.assert_almost_equal( left_valid, right_valid, - check_dtype=check_dtype, + check_dtype=bool(check_dtype), rtol=rtol, atol=atol, obj="ExtensionArray", @@ -870,7 +873,7 @@ def assert_extension_array_equal( def assert_series_equal( left, right, - check_dtype=True, + check_dtype: bool | Literal["equiv"] = True, check_index_type="equiv", check_series_type=True, check_less_precise=no_default, @@ -1064,7 +1067,7 @@ def assert_series_equal( right._values, rtol=rtol, atol=atol, - check_dtype=check_dtype, + check_dtype=bool(check_dtype), obj=str(obj), index_values=np.asarray(left.index), ) @@ -1100,7 +1103,7 @@ def assert_series_equal( right._values, rtol=rtol, atol=atol, - check_dtype=check_dtype, + check_dtype=bool(check_dtype), obj=str(obj), index_values=np.asarray(left.index), ) @@ -1125,7 +1128,7 @@ def assert_series_equal( def assert_frame_equal( left, right, - check_dtype=True, + check_dtype: bool | Literal["equiv"] = True, check_index_type="equiv", check_column_type="equiv", check_frame_type=True, @@ -1403,8 +1406,8 @@ def assert_sp_array_equal(left, right): assert_numpy_array_equal(left.sp_values, right.sp_values) # SparseIndex comparison - assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex) - assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex) + assert isinstance(left.sp_index, SparseIndex) + assert isinstance(right.sp_index, SparseIndex) left_index = left.sp_index right_index = right.sp_index diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index d7eba6b8319fb..bbcf984e68b4b 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -35,6 +35,7 @@ def test_foo(): from pandas._config import get_option +from pandas._typing import F from pandas.compat import ( IS64, is_platform_windows, @@ -216,7 +217,7 @@ def skip_if_np_lt(ver_str: str, *args, reason: str | None = None): ) -def parametrize_fixture_doc(*args): +def parametrize_fixture_doc(*args) -> Callable[[F], F]: """ Intended for use as a decorator for parametrized fixture, this function will wrap the decorated function with a pytest
Toward's pyrights reportGeneralTypeIssues. After this almost all exceptions for reportGeneralTypeIssues are in pandas/{core, io, plotting}.
https://api.github.com/repos/pandas-dev/pandas/pulls/47037
2022-05-17T01:36:09Z
2022-06-05T22:49:13Z
2022-06-05T22:49:13Z
2022-06-08T19:26:35Z
REF: write indexing checks in terms of should_fallback_to_positional
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8ebaaa28e13a5..c4a3afbb282cf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4205,9 +4205,14 @@ def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) - is_positional = is_index_slice and not ( - self.is_integer() or self.is_categorical() + + # special case for interval_dtype bc we do not do partial-indexing + # on integer Intervals when slicing + # TODO: write this in terms of e.g. should_partial_index? + ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( + self.dtype ) + is_positional = is_index_slice and ints_are_positional if kind == "getitem": """ diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 174e0a7f81850..c1cb5ad315298 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -222,6 +222,8 @@ def _convert_slice_indexer(self, key: slice, kind: str, is_frame: bool = False): if is_float_dtype(self.dtype): assert kind in ["loc", "getitem"] + # TODO: can we write this as a condition based on + # e.g. _should_fallback_to_positional? # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(key.start, key.stop, key.step) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ddae58fd46bb0..02c095202d079 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1322,10 +1322,6 @@ def _convert_to_indexer(self, key, axis: int): if isinstance(key, slice): return labels._convert_slice_indexer(key, kind="loc") - # see if we are positional in nature - is_int_index = labels.is_integer() - is_int_positional = is_integer(key) and not is_int_index - if ( isinstance(key, tuple) and not isinstance(labels, MultiIndex) @@ -1350,17 +1346,9 @@ def _convert_to_indexer(self, key, axis: int): if not isinstance(labels, MultiIndex): raise except ValueError: - if not is_int_positional: + if not is_integer(key): raise - - # a positional - if is_int_positional: - - # if we are setting and its not a valid location - # its an insert which fails by definition - - # always valid - return {"key": key} + return {"key": key} if is_nested_tuple(key, labels): if self.ndim == 1 and any(isinstance(k, tuple) for k in key):
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47036
2022-05-16T20:05:15Z
2022-05-17T12:32:44Z
2022-05-17T12:32:44Z
2022-05-17T14:37:27Z
Stacklevel argument updated #46687
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 589ea6e67d926..5d2331298fc98 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10973,7 +10973,6 @@ def _add_numeric_operations(cls): @deprecate_nonkeyword_arguments( version=None, allowed_args=["self"], - stacklevel=find_stack_level() - 1, name="DataFrame.any and Series.any", ) @doc( diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index fbea7a71202eb..b6261b05f6cd7 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -318,9 +318,7 @@ def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: decompression_options=_shared_docs["decompression_options"] % "path_or_buf", ) @deprecate_kwarg(old_arg_name="numpy", new_arg_name=None) -@deprecate_nonkeyword_arguments( - version="2.0", allowed_args=["path_or_buf"], stacklevel=3 -) +@deprecate_nonkeyword_arguments(version="2.0", allowed_args=["path_or_buf"]) def read_json( path_or_buf=None, orient=None, diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index e811ace78f1f5..763bae946a1b4 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1166,9 +1166,7 @@ def read_table( ... -@deprecate_nonkeyword_arguments( - version=None, allowed_args=["filepath_or_buffer"], stacklevel=3 -) +@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"]) @Appender( _doc_read_csv_and_table.format( func_name="read_table", @@ -1265,9 +1263,7 @@ def read_table( return _read(filepath_or_buffer, kwds) -@deprecate_nonkeyword_arguments( - version=None, allowed_args=["filepath_or_buffer"], stacklevel=2 -) +@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"]) def read_fwf( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], colspecs: Sequence[tuple[int, int]] | str | None = "infer", diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 052e674d1a488..57014ef9c9622 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -78,9 +78,7 @@ def read_sas( ... -@deprecate_nonkeyword_arguments( - version=None, allowed_args=["filepath_or_buffer"], stacklevel=2 -) +@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"]) @doc(decompression_options=_shared_docs["decompression_options"]) def read_sas( filepath_or_buffer: FilePath | ReadBuffer[bytes], diff --git a/pandas/io/xml.py b/pandas/io/xml.py index 78fbeaad09300..57bb72a951853 100644 --- a/pandas/io/xml.py +++ b/pandas/io/xml.py @@ -974,9 +974,7 @@ def _parse( ) -@deprecate_nonkeyword_arguments( - version=None, allowed_args=["path_or_buffer"], stacklevel=2 -) +@deprecate_nonkeyword_arguments(version=None, allowed_args=["path_or_buffer"]) @doc( storage_options=_shared_docs["storage_options"], decompression_options=_shared_docs["decompression_options"] % "path_or_buffer", diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 7f60fd60c91b0..a0da3a7eaadce 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -920,5 +920,4 @@ def test_read_table_posargs_deprecation(all_parsers): "In a future version of pandas all arguments of read_table " "except for the argument 'filepath_or_buffer' will be keyword-only" ) - with tm.assert_produces_warning(FutureWarning, match=msg): - parser.read_table(data, " ") + parser.read_table_check_warnings(FutureWarning, msg, data, " ") diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 066f448d97505..0462d1fe6da0b 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -42,6 +42,16 @@ def read_table(self, *args, **kwargs): kwargs = self.update_kwargs(kwargs) return read_table(*args, **kwargs) + def read_table_check_warnings( + self, warn_type: type[Warning], warn_msg: str, *args, **kwargs + ): + # We need to check the stacklevel here instead of in the tests + # since this is where read_table is called and where the warning + # should point to. + kwargs = self.update_kwargs(kwargs) + with tm.assert_produces_warning(warn_type, match=warn_msg): + return read_table(*args, **kwargs) + class CParser(BaseParser): engine = "c" diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 0f15511e491cc..978f2982e6d18 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -13,6 +13,7 @@ from pandas._libs.properties import cache_readonly # noqa:F401 from pandas._typing import F +from pandas.util._exceptions import find_stack_level def deprecate( @@ -260,7 +261,6 @@ def future_version_msg(version: str | None) -> str: def deprecate_nonkeyword_arguments( version: str | None, allowed_args: list[str] | None = None, - stacklevel: int = 2, name: str | None = None, ) -> Callable[[F], F]: """ @@ -280,9 +280,6 @@ def deprecate_nonkeyword_arguments( defaults to list of all arguments not having the default value. - stacklevel : int, default=2 - The stack level for warnings.warn - name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function @@ -312,7 +309,7 @@ def wrapper(*args, **kwargs): warnings.warn( msg.format(arguments=arguments), FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) return func(*args, **kwargs)
- [x] closes #46687 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47035
2022-05-16T19:58:59Z
2022-06-30T18:06:56Z
2022-06-30T18:06:56Z
2022-06-30T18:07:06Z
ENH: Incorproate ArrowDtype into ArrowExtensionArray
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index 53e003e2ed7dd..fbf1cea670c5c 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -26,6 +26,7 @@ ) from pandas._typing import Dtype +from pandas.compat import pa_version_under1p01 from pandas.core.dtypes.common import ( is_float_dtype, @@ -193,6 +194,45 @@ ] ] +if not pa_version_under1p01: + import pyarrow as pa + + UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + SIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.int16(), pa.int32(), pa.uint64()] + ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()] + STRING_PYARROW_DTYPES = [pa.string(), pa.utf8()] + + TIME_PYARROW_DTYPES = [ + pa.time32("s"), + pa.time32("ms"), + pa.time64("us"), + pa.time64("ns"), + ] + DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()] + DATETIME_PYARROW_DTYPES = [ + pa.timestamp(unit=unit, tz=tz) + for unit in ["s", "ms", "us", "ns"] + for tz in [None, "UTC", "US/Pacific", "US/Eastern"] + ] + TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]] + + BOOL_PYARROW_DTYPES = [pa.bool_()] + + # TODO: Add container like pyarrow types: + # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions + ALL_PYARROW_DTYPES = ( + ALL_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES + + TIME_PYARROW_DTYPES + + DATE_PYARROW_DTYPES + + DATETIME_PYARROW_DTYPES + + TIMEDELTA_PYARROW_DTYPES + + BOOL_PYARROW_DTYPES + ) + + EMPTY_STRING_PATTERN = re.compile("^$") # set testing_mode diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index fdd505e259dd9..66bb12db277fc 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -9,6 +9,8 @@ import numpy as np from pandas._typing import ( + Dtype, + PositionalIndexer, TakeIndexer, npt, ) @@ -24,6 +26,7 @@ is_array_like, is_bool_dtype, is_integer, + is_integer_dtype, is_scalar, ) from pandas.core.dtypes.missing import isna @@ -31,6 +34,7 @@ from pandas.core.arrays.base import ExtensionArray from pandas.core.indexers import ( check_array_indexer, + unpack_tuple_and_ellipses, validate_indices, ) @@ -39,6 +43,7 @@ import pyarrow.compute as pc from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning + from pandas.core.arrays.arrow.dtype import ArrowDtype if TYPE_CHECKING: from pandas import Series @@ -48,16 +53,130 @@ class ArrowExtensionArray(ExtensionArray): """ - Base class for ExtensionArray backed by Arrow array. + Base class for ExtensionArray backed by Arrow ChunkedArray. """ _data: pa.ChunkedArray - def __init__(self, values: pa.ChunkedArray) -> None: - self._data = values + def __init__(self, values: pa.Array | pa.ChunkedArray) -> None: + if pa_version_under1p01: + msg = "pyarrow>=1.0.0 is required for PyArrow backed ArrowExtensionArray." + raise ImportError(msg) + if isinstance(values, pa.Array): + self._data = pa.chunked_array([values]) + elif isinstance(values, pa.ChunkedArray): + self._data = values + else: + raise ValueError( + f"Unsupported type '{type(values)}' for ArrowExtensionArray" + ) + self._dtype = ArrowDtype(self._data.type) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): + """ + Construct a new ExtensionArray from a sequence of scalars. + """ + if isinstance(dtype, ArrowDtype): + pa_dtype = dtype.pyarrow_dtype + elif dtype: + pa_dtype = pa.from_numpy_dtype(dtype) + else: + pa_dtype = None + + if isinstance(scalars, cls): + data = scalars._data + if pa_dtype: + data = data.cast(pa_dtype) + return cls(data) + else: + return cls( + pa.chunked_array(pa.array(scalars, type=pa_dtype, from_pandas=True)) + ) + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy=False + ): + """ + Construct a new ExtensionArray from a sequence of strings. + """ + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + def __getitem__(self, item: PositionalIndexer): + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + item = check_array_indexer(self, item) + + if isinstance(item, np.ndarray): + if not len(item): + # Removable once we migrate StringDtype[pyarrow] to ArrowDtype[string] + if self._dtype.name == "string" and self._dtype.storage == "pyarrow": + pa_dtype = pa.string() + else: + pa_dtype = self._dtype.pyarrow_dtype + return type(self)(pa.chunked_array([], type=pa_dtype)) + elif is_integer_dtype(item.dtype): + return self.take(item) + elif is_bool_dtype(item.dtype): + return type(self)(self._data.filter(item)) + else: + raise IndexError( + "Only integers, slices and integer or " + "boolean arrays are valid indices." + ) + elif isinstance(item, tuple): + item = unpack_tuple_and_ellipses(item) + + # error: Non-overlapping identity check (left operand type: + # "Union[Union[int, integer[Any]], Union[slice, List[int], + # ndarray[Any, Any]]]", right operand type: "ellipsis") + if item is Ellipsis: # type: ignore[comparison-overlap] + # TODO: should be handled by pyarrow? + item = slice(None) + + if is_scalar(item) and not is_integer(item): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + # We are not an array indexer, so maybe e.g. a slice or integer + # indexer. We dispatch to pyarrow. + value = self._data[item] + if isinstance(value, pa.ChunkedArray): + return type(self)(value) + else: + scalar = value.as_py() + if scalar is None: + return self._dtype.na_value + else: + return scalar def __arrow_array__(self, type=None): - """Convert myself to a pyarrow Array or ChunkedArray.""" + """Convert myself to a pyarrow ChunkedArray.""" return self._data def equals(self, other) -> bool: @@ -67,6 +186,13 @@ def equals(self, other) -> bool: # TODO: is this documented somewhere? return self._data == other._data + @property + def dtype(self) -> ArrowDtype: + """ + An instance of 'ExtensionDtype'. + """ + return self._dtype + @property def nbytes(self) -> int: """ @@ -377,7 +503,8 @@ def _indexing_key_to_indices( def _maybe_convert_setitem_value(self, value): """Maybe convert value to be pyarrow compatible.""" - raise NotImplementedError() + # TODO: Make more robust like ArrowStringArray._maybe_convert_setitem_value + return value def _set_via_chunk_iteration( self, indices: npt.NDArray[np.intp], value: npt.NDArray[Any] diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py index c0ecb0856f27f..6c932f3b94e53 100644 --- a/pandas/core/arrays/arrow/dtype.py +++ b/pandas/core/arrays/arrow/dtype.py @@ -1,35 +1,60 @@ from __future__ import annotations +import re + import numpy as np import pyarrow as pa +from pandas._libs import missing as libmissing from pandas._typing import DtypeObj from pandas.util._decorators import cache_readonly -from pandas.core.dtypes.base import StorageExtensionDtype - -from pandas.core.arrays.arrow import ArrowExtensionArray +from pandas.core.dtypes.base import ( + StorageExtensionDtype, + register_extension_dtype, +) +@register_extension_dtype class ArrowDtype(StorageExtensionDtype): """ - Base class for dtypes for BaseArrowArray subclasses. + Base class for dtypes for ArrowExtensionArray. Modeled after BaseMaskedDtype """ - name: str - base = None - type: pa.DataType + na_value = libmissing.NA + _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment] - na_value = pa.NA + def __init__(self, pyarrow_dtype: pa.DataType) -> None: + super().__init__("pyarrow") + if not isinstance(pyarrow_dtype, pa.DataType): + raise ValueError( + f"pyarrow_dtype ({pyarrow_dtype}) must be an instance " + f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead." + ) + self.pyarrow_dtype = pyarrow_dtype - def __init__(self, storage="pyarrow") -> None: - super().__init__(storage) + @property + def type(self): + """ + Returns pyarrow.DataType. + """ + return type(self.pyarrow_dtype) + + @property + def name(self) -> str: # type: ignore[override] + """ + A string identifying the data type. + """ + return str(self.pyarrow_dtype) @cache_readonly def numpy_dtype(self) -> np.dtype: """Return an instance of the related numpy dtype""" - return self.type.to_pandas_dtype() + try: + return np.dtype(self.pyarrow_dtype.to_pandas_dtype()) + except (NotImplementedError, TypeError): + return np.dtype(object) @cache_readonly def kind(self) -> str: @@ -49,6 +74,8 @@ def construct_array_type(cls): ------- type """ + from pandas.core.arrays.arrow import ArrowExtensionArray + return ArrowExtensionArray @classmethod @@ -59,29 +86,52 @@ def construct_from_string(cls, string: str): Parameters ---------- string : str + string should follow the format f"{pyarrow_type}[pyarrow]" + e.g. int64[pyarrow] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) - if string == f"{cls.name}[pyarrow]": - return cls(storage="pyarrow") - raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + if not string.endswith("[pyarrow]"): + raise TypeError(f"string {string} must end with '[pyarrow]'") + base_type = string.split("[pyarrow]")[0] + pa_dtype = getattr(pa, base_type, None) + if pa_dtype is None: + has_parameters = re.search(r"\[.*\]", base_type) + if has_parameters: + raise NotImplementedError( + "Passing pyarrow type specific parameters " + f"({has_parameters.group()}) in the string is not supported. " + "Please construct an ArrowDtype object with a pyarrow_dtype " + "instance with specific parameters." + ) + raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") + return cls(pa_dtype()) + + @property + def _is_numeric(self) -> bool: + """ + Whether columns with this dtype should be considered numeric. + """ + # TODO: pa.types.is_boolean? + return ( + pa.types.is_integer(self.pyarrow_dtype) + or pa.types.is_floating(self.pyarrow_dtype) + or pa.types.is_decimal(self.pyarrow_dtype) + ) - @classmethod - def from_numpy_dtype(cls, dtype: np.dtype) -> ArrowDtype: + @property + def _is_boolean(self) -> bool: """ - Construct the ArrowDtype corresponding to the given numpy dtype. + Whether this dtype should be considered boolean. """ - # TODO: This may be incomplete - pa_dtype = pa.from_numpy_dtype(dtype) - if pa_dtype is cls.type: - return cls() - raise NotImplementedError(dtype) + return pa.types.is_boolean(self.pyarrow_dtype) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # We unwrap any masked dtypes, find the common dtype we would use # for that, then re-mask the result. + # Mirrors BaseMaskedDtype from pandas.core.dtypes.cast import find_common_type new_dtype = find_common_type( @@ -91,11 +141,11 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: ] ) if not isinstance(new_dtype, np.dtype): - # If we ever support e.g. Masked[DatetimeArray] then this will change return None try: - return type(self).from_numpy_dtype(new_dtype) - except (KeyError, NotImplementedError): + pa_dtype = pa.from_numpy_dtype(new_dtype) + return type(self)(pa_dtype) + except NotImplementedError: return None def __from_arrow__(self, array: pa.Array | pa.ChunkedArray): diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 21b5dc625956e..45683d83a1303 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -64,9 +64,6 @@ class StringDtype(StorageExtensionDtype): StringDtype is considered experimental. The implementation and parts of the API may change without warning. - In particular, StringDtype.na_value may change to no longer be - ``pd.NA``. - Parameters ---------- storage : {"python", "pyarrow"}, optional diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 8b6f1ffcfa59b..a07f748fa0c8c 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -2,10 +2,7 @@ from collections.abc import Callable # noqa: PDF001 import re -from typing import ( - Union, - overload, -) +from typing import Union import numpy as np @@ -16,10 +13,7 @@ from pandas._typing import ( Dtype, NpDtype, - PositionalIndexer, Scalar, - ScalarIndexer, - SequenceIndexer, npt, ) from pandas.compat import ( @@ -32,7 +26,6 @@ from pandas.core.dtypes.common import ( is_bool_dtype, is_dtype_equal, - is_integer, is_integer_dtype, is_object_dtype, is_scalar, @@ -50,10 +43,6 @@ BaseStringArray, StringDtype, ) -from pandas.core.indexers import ( - check_array_indexer, - unpack_tuple_and_ellipses, -) from pandas.core.strings.object_array import ObjectStringArrayMixin if not pa_version_under1p01: @@ -76,7 +65,7 @@ def _chk_pyarrow_available() -> None: if pa_version_under1p01: - msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." + msg = "pyarrow>=1.0.0 is required for PyArrow backed ArrowExtensionArray." raise ImportError(msg) @@ -132,13 +121,9 @@ class ArrowStringArray( """ def __init__(self, values) -> None: + super().__init__(values) + # TODO: Migrate to ArrowDtype instead self._dtype = StringDtype(storage="pyarrow") - if isinstance(values, pa.Array): - self._data = pa.chunked_array([values]) - elif isinstance(values, pa.ChunkedArray): - self._data = values - else: - raise ValueError(f"Unsupported type '{type(values)}' for ArrowStringArray") if not pa.types.is_string(self._data.type): raise ValueError( @@ -174,7 +159,7 @@ def _from_sequence_of_strings( return cls._from_sequence(strings, dtype=dtype, copy=copy) @property - def dtype(self) -> StringDtype: + def dtype(self) -> StringDtype: # type: ignore[override] """ An instance of 'string[pyarrow]'. """ @@ -205,86 +190,6 @@ def to_numpy( result[mask] = na_value return result - @overload - def __getitem__(self, item: ScalarIndexer) -> ArrowStringScalarOrNAT: - ... - - @overload - def __getitem__(self: ArrowStringArray, item: SequenceIndexer) -> ArrowStringArray: - ... - - def __getitem__( - self: ArrowStringArray, item: PositionalIndexer - ) -> ArrowStringArray | ArrowStringScalarOrNAT: - """Select a subset of self. - - Parameters - ---------- - item : int, slice, or ndarray - * int: The position in 'self' to get. - * slice: A slice object, where 'start', 'stop', and 'step' are - integers or None - * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' - - Returns - ------- - item : scalar or ExtensionArray - - Notes - ----- - For scalar ``item``, return a scalar value suitable for the array's - type. This should be an instance of ``self.dtype.type``. - For slice ``key``, return an instance of ``ExtensionArray``, even - if the slice is length 0 or 1. - For a boolean mask, return an instance of ``ExtensionArray``, filtered - to the values where ``item`` is True. - """ - item = check_array_indexer(self, item) - - if isinstance(item, np.ndarray): - if not len(item): - return type(self)(pa.chunked_array([], type=pa.string())) - elif is_integer_dtype(item.dtype): - return self.take(item) - elif is_bool_dtype(item.dtype): - return type(self)(self._data.filter(item)) - else: - raise IndexError( - "Only integers, slices and integer or " - "boolean arrays are valid indices." - ) - elif isinstance(item, tuple): - item = unpack_tuple_and_ellipses(item) - - # error: Non-overlapping identity check (left operand type: - # "Union[Union[int, integer[Any]], Union[slice, List[int], - # ndarray[Any, Any]]]", right operand type: "ellipsis") - if item is Ellipsis: # type: ignore[comparison-overlap] - # TODO: should be handled by pyarrow? - item = slice(None) - - if is_scalar(item) and not is_integer(item): - # e.g. "foo" or 2.5 - # exception message copied from numpy - raise IndexError( - r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " - r"(`None`) and integer or boolean arrays are valid indices" - ) - # We are not an array indexer, so maybe e.g. a slice or integer - # indexer. We dispatch to pyarrow. - value = self._data[item] - if isinstance(value, pa.ChunkedArray): - return type(self)(value) - else: - return self._as_pandas_scalar(value) - - def _as_pandas_scalar(self, arrow_scalar: pa.Scalar): - scalar = arrow_scalar.as_py() - if scalar is None: - return self._dtype.na_value - else: - return scalar - def _cmp_method(self, other, op): from pandas.arrays import BooleanArray diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 9762b779477e4..cffac15ef6496 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -408,7 +408,7 @@ def __str__(self): return self.name def __eq__(self, other: Any) -> bool: - if isinstance(other, self.type) and other == self.name: + if isinstance(other, str) and other == self.name: return True return super().__eq__(other) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index e9d48eb937b36..b563f84207b22 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -262,7 +262,7 @@ def test_constructor_raises(cls): if cls is pd.arrays.StringArray: msg = "StringArray requires a sequence of strings or pandas.NA" else: - msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray" + msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowExtensionArray" with pytest.raises(ValueError, match=msg): cls(np.array(["a", "b"], dtype="S1")) diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index de1b7a9c603af..f43cf298857a0 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -59,7 +59,7 @@ def test_constructor_not_string_type_raises(array, chunked): pytest.skip("chunked not applicable to numpy array") arr = pa.chunked_array(arr) if array is np: - msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray" + msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowExtensionArray" else: msg = re.escape( "ArrowStringArray requires a PyArrow (chunked) array of string type" @@ -122,7 +122,7 @@ def test_from_sequence_wrong_dtype_raises(): reason="pyarrow is installed", ) def test_pyarrow_not_installed_raises(): - msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed StringArray") + msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed") with pytest.raises(ImportError, match=msg): StringDtype(storage="pyarrow") diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index d19a6245809be..22595c4e461d7 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -185,7 +185,7 @@ def __init__(self, values) -> None: assert values.type == pa.bool_() self._data = values - self._dtype = ArrowBoolDtype() + self._dtype = ArrowBoolDtype() # type: ignore[assignment] class ArrowStringArray(ArrowExtensionArray): @@ -195,4 +195,4 @@ def __init__(self, values) -> None: assert values.type == pa.string() self._data = values - self._dtype = ArrowStringDtype() + self._dtype = ArrowStringDtype() # type: ignore[assignment] diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py index b2750784ab3d6..5b81940e5a6c0 100644 --- a/pandas/tests/extension/arrow/test_timestamp.py +++ b/pandas/tests/extension/arrow/test_timestamp.py @@ -46,7 +46,7 @@ def __init__(self, values) -> None: assert values.type == pa.timestamp("us") self._data = values - self._dtype = ArrowTimestampUSDtype() + self._dtype = ArrowTimestampUSDtype() # type: ignore[assignment] def test_constructor_extensionblock(): diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py new file mode 100644 index 0000000000000..4047c0db1fee4 --- /dev/null +++ b/pandas/tests/extension/test_arrow.py @@ -0,0 +1,184 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. +""" + +from datetime import ( + date, + datetime, + time, + timedelta, +) + +import pytest + +from pandas.compat import ( + pa_version_under2p0, + pa_version_under3p0, +) + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension import base + +pa = pytest.importorskip("pyarrow", minversion="1.0.1") + +from pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip + + +@pytest.fixture(params=tm.ALL_PYARROW_DTYPES) +def dtype(request): + return ArrowDtype(pyarrow_dtype=request.param) + + +@pytest.fixture +def data(dtype): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_boolean(pa_dtype): + data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False] + elif pa.types.is_floating(pa_dtype): + data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5] + elif pa.types.is_signed_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99] + elif pa.types.is_unsigned_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99] + elif pa.types.is_date(pa_dtype): + data = ( + [date(2022, 1, 1), date(1999, 12, 31)] * 4 + + [None] + + [date(2022, 1, 1), date(2022, 1, 1)] * 44 + + [None] + + [date(1999, 12, 31), date(1999, 12, 31)] + ) + elif pa.types.is_timestamp(pa_dtype): + data = ( + [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4 + + [None] + + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44 + + [None] + + [datetime(2020, 1, 1), datetime(1999, 1, 1)] + ) + elif pa.types.is_duration(pa_dtype): + data = ( + [timedelta(1), timedelta(1, 1)] * 4 + + [None] + + [timedelta(-1), timedelta(0)] * 44 + + [None] + + [timedelta(-10), timedelta(10)] + ) + elif pa.types.is_time(pa_dtype): + data = ( + [time(12, 0), time(0, 12)] * 4 + + [None] + + [time(0, 0), time(1, 1)] * 44 + + [None] + + [time(0, 5), time(5, 0)] + ) + else: + raise NotImplementedError + return pd.array(data, dtype=dtype) + + +@pytest.fixture +def data_missing(data): + """Length-2 array with [NA, Valid]""" + return type(data)._from_sequence([None, data[0]]) + + +@pytest.fixture +def na_value(): + """The scalar missing value for this type. Default 'None'""" + return pd.NA + + +class TestConstructors(base.BaseConstructorsTests): + @pytest.mark.xfail( + reason=( + "str(dtype) constructs " + "e.g. in64[pyarrow] like int64 (numpy) " + "due to StorageExtensionDtype.__str__" + ) + ) + def test_from_dtype(self, data): + super().test_from_dtype(data) + + +class TestGetitemTests(base.BaseGetitemTests): + @pytest.mark.xfail( + reason=( + "data.dtype.type return pyarrow.DataType " + "but this (intentionally) returns " + "Python scalars or pd.Na" + ) + ) + def test_getitem_scalar(self, data): + super().test_getitem_scalar(data) + + def test_take_series(self, request, data): + tz = getattr(data.dtype.pyarrow_dtype, "tz", None) + unit = getattr(data.dtype.pyarrow_dtype, "unit", None) + bad_units = ["ns"] + if pa_version_under2p0: + bad_units.extend(["s", "ms", "us"]) + if pa_version_under3p0 and tz not in (None, "UTC") and unit in bad_units: + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"Not supported by pyarrow < 3.0 " + f"with timestamp type {tz} and {unit}" + ) + ) + ) + super().test_take_series(data) + + def test_reindex(self, request, data, na_value): + tz = getattr(data.dtype.pyarrow_dtype, "tz", None) + unit = getattr(data.dtype.pyarrow_dtype, "unit", None) + bad_units = ["ns"] + if pa_version_under2p0: + bad_units.extend(["s", "ms", "us"]) + if pa_version_under3p0 and tz not in (None, "UTC") and unit in bad_units: + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"Not supported by pyarrow < 3.0 " + f"with timestamp type {tz} and {unit}" + ) + ) + ) + super().test_reindex(data, na_value) + + def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data): + tz = getattr(data.dtype.pyarrow_dtype, "tz", None) + unit = getattr(data.dtype.pyarrow_dtype, "unit", None) + bad_units = ["ns"] + if pa_version_under2p0: + bad_units.extend(["s", "ms", "us"]) + if ( + pa_version_under3p0 + and not using_array_manager + and tz not in (None, "UTC") + and unit in bad_units + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"Not supported by pyarrow < 3.0 " + f"with timestamp type {tz} and {unit}" + ) + ) + ) + super().test_loc_iloc_frame_single_dtype(data) + + +def test_arrowdtype_construct_from_string_type_with_parameters(): + with pytest.raises(NotImplementedError, match="Passing pyarrow type"): + ArrowDtype.construct_from_string("timestamp[s][pyarrow]")
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Not fully user facing yet. Supersedes https://github.com/pandas-dev/pandas/pull/46972 cc @jbrockmendel let me know if this is what you had in mind
https://api.github.com/repos/pandas-dev/pandas/pulls/47034
2022-05-16T18:53:40Z
2022-06-09T12:42:59Z
2022-06-09T12:42:59Z
2022-08-25T23:44:11Z
DOC: Fix a typo in documentation for qyear
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 05e71c22052ad..de44f0da8275e 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -2202,7 +2202,7 @@ cdef class _Period(PeriodMixin): 2018 If the fiscal year starts in April (`Q-MAR`), the first quarter of - 2018 will start in April 2017. `year` will then be 2018, but `qyear` + 2018 will start in April 2017. `year` will then be 2017, but `qyear` will be the fiscal year, 2018. >>> per = pd.Period('2018Q1', freq='Q-MAR')
This fixes a typo that I ran across in the [documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Period.qyear.html?highlight=qyear#pandas.Period.qyear). This change corrects the wording to match the given example that is just below.
https://api.github.com/repos/pandas-dev/pandas/pulls/47033
2022-05-16T18:13:19Z
2022-05-16T18:47:38Z
2022-05-16T18:47:38Z
2022-05-16T18:47:48Z
Backport PR #47015 on branch 1.4.x (CI: Ensure no-use-pep517 with no-build-isolation with new pip version)
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 483353cfcb3cd..80448319f7918 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -104,6 +104,6 @@ echo "Build extensions" python setup.py build_ext -q -j3 echo "Install pandas" -python -m pip install --no-build-isolation -e . +python -m pip install --no-build-isolation --no-use-pep517 -e . echo "done"
Backport PR #47015: CI: Ensure no-use-pep517 with no-build-isolation with new pip version
https://api.github.com/repos/pandas-dev/pandas/pulls/47031
2022-05-15T13:50:48Z
2022-05-15T15:22:52Z
2022-05-15T15:22:52Z
2022-05-15T15:22:52Z
Backport PR #47020 on branch 1.4.x (CI: Move 32 bit Linux build to GHA)
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml new file mode 100644 index 0000000000000..500e800a082d9 --- /dev/null +++ b/.github/workflows/32-bit-linux.yml @@ -0,0 +1,43 @@ +name: 32 Bit Linux + +on: + push: + branches: + - main + - 1.4.x + pull_request: + branches: + - main + - 1.4.x + paths-ignore: + - "doc/**" + +jobs: + pytest: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Run 32-bit manylinux2014 Docker Build / Tests + run: | + docker pull quay.io/pypa/manylinux2014_i686 + docker run --platform linux/386 -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \ + /bin/bash -xc "cd pandas && \ + /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \ + . ~/virtualenvs/pandas-dev/bin/activate && \ + python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \ + pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \ + python setup.py build_ext -q -j2 && \ + python -m pip install --no-build-isolation --no-use-pep517 -e . && \ + export PANDAS_CI=1 && \ + pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml" + + - name: Publish test results for Python 3.8-32 bit full Linux + uses: actions/upload-artifact@v3 + with: + name: Test results + path: test-data.xml + if: failure() diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index 8ca4cce155e96..23a48e567dfe9 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -2,7 +2,7 @@ # Unfreeze(by commentingthe if: false() condition) once the # next Python Dev version has released beta 1 and both Cython and numpy support it # After that Python has released, migrate the workflows to the -# posix GHA workflows/Azure pipelines and "freeze" this file by +# posix GHA workflows and "freeze" this file by # uncommenting the if: false() condition # Feel free to modify this comment as necessary. diff --git a/README.md b/README.md index 26aed081de4af..4eb983cfb24e8 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![Package Status](https://img.shields.io/pypi/status/pandas.svg)](https://pypi.org/project/pandas/) [![License](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/main/LICENSE) -[![Azure Build Status](https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=main)](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=main) [![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=main)](https://codecov.io/gh/pandas-dev/pandas) [![Downloads](https://static.pepy.tech/personalized-badge/pandas?period=month&units=international_system&left_color=black&right_color=orange&left_text=PyPI%20downloads%20per%20month)](https://pepy.tech/project/pandas) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/pydata/pandas) diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 0c6195ff6924b..0000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,50 +0,0 @@ -# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml -trigger: - branches: - include: - - main - - 1.4.x - paths: - exclude: - - 'doc/*' - -pr: - autoCancel: true - branches: - include: - - main - - 1.4.x - -variables: - PYTEST_WORKERS: auto - PYTEST_TARGET: pandas - PATTERN: "not slow and not high_memory and not db and not network and not single_cpu" - PANDAS_CI: 1 - -jobs: -- job: py38_32bit - pool: - vmImage: ubuntu-18.04 - - steps: - # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941 - - script: | - docker pull quay.io/pypa/manylinux2014_i686 - docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "cd pandas && \ - /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \ - . ~/virtualenvs/pandas-dev/bin/activate && \ - python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \ - pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \ - python setup.py build_ext -q -j2 && \ - python -m pip install --no-build-isolation -e . && \ - export PANDAS_CI=1 && \ - pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python 3.8-32 bit full Linux' diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 4826921d4866b..6a8f07663578e 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -443,13 +443,11 @@ library. This makes type checkers aware of the type annotations shipped with pan Testing with continuous integration ----------------------------------- -The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ and -`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__ +The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ continuous integration services, once your pull request is submitted. However, if you wish to run the test suite on a branch prior to submitting the pull request, then the continuous integration services need to be hooked to your GitHub repository. Instructions are here -for `GitHub Actions <https://docs.github.com/en/actions/>`__ and -`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/?view=azure-devops>`__. +for `GitHub Actions <https://docs.github.com/en/actions/>`__. A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, then you will get a red 'X', where you can click through to see the individual failed tests. diff --git a/pandas/conftest.py b/pandas/conftest.py index 958df72b3f607..148f8bea16b0c 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -649,7 +649,7 @@ def index_with_missing(request): """ # GH 35538. Use deep copy to avoid illusive bug on np-dev - # Azure pipeline that writes into indices_dict despite copy + # GHA pipeline that writes into indices_dict despite copy ind = indices_dict[request.param].copy(deep=True) vals = ind.values if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index ff31d93947776..522d25205eeb0 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -71,7 +71,7 @@ def s3_base(worker_id): if is_platform_arm() or is_platform_mac() or is_platform_windows(): # NOT RUN on Windows/MacOS/ARM, only Ubuntu # - subprocess in CI can cause timeouts - # - Azure pipelines/Github Actions do not support + # - Github Actions do not support # container services for the above OSs # - CircleCI will probably hit the Docker rate pull limit pytest.skip( diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 9fe7ae7a5bb90..cf63ab2fe31c7 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -21,7 +21,7 @@ # TODO(GH#44584): Mark these as pytest.mark.single_cpu pytestmark = pytest.mark.skipif( is_ci_environment() and (is_platform_windows() or is_platform_mac()), - reason="On Azure CI, Windows can fail with " + reason="On GHA CI, Windows can fail with " "'Windows fatal exception: stack overflow' " "and MacOS can timeout", ) diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py index ab435a39a497b..b98129e1b07ec 100644 --- a/pandas/tests/window/test_online.py +++ b/pandas/tests/window/test_online.py @@ -17,7 +17,7 @@ # TODO(GH#44584): Mark these as pytest.mark.single_cpu pytestmark = pytest.mark.skipif( is_ci_environment() and (is_platform_windows() or is_platform_mac()), - reason="On Azure CI, Windows can fail with " + reason="On GHA CI, Windows can fail with " "'Windows fatal exception: stack overflow' " "and MacOS can timeout", )
Backport PR #47020
https://api.github.com/repos/pandas-dev/pandas/pulls/47029
2022-05-15T11:33:59Z
2022-05-15T13:53:41Z
2022-05-15T13:53:41Z
2022-05-15T13:53:45Z
DOC: Clarify decay argument validation in ewm when times is provided
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 9f1c4755bc54f..4681257dcfca0 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -150,6 +150,7 @@ Other enhancements - Added ``validate`` argument to :meth:`DataFrame.join` (:issue:`46622`) - A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`) - Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`) +- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`) .. --------------------------------------------------------------------------- .. _whatsnew_150.notable_bug_fixes: diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 32cb4938344c4..922d194f04c55 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -134,8 +134,9 @@ class ExponentialMovingWindow(BaseWindow): r""" Provide exponentially weighted (EW) calculations. - Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be - provided. + Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be + provided if ``times`` is not provided. If ``times`` is provided, + ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided. Parameters ---------- @@ -155,7 +156,7 @@ class ExponentialMovingWindow(BaseWindow): :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. - If ``times`` is specified, the time unit (str or timedelta) over which an + If ``times`` is specified, a timedelta convertible unit over which an observation decays to half its value. Only applicable to ``mean()``, and halflife value will not apply to the other functions. @@ -389,10 +390,8 @@ def __init__( raise ValueError("times must be datetime64[ns] dtype.") if len(self.times) != len(obj): raise ValueError("times must be the same length as the object.") - if not isinstance(self.halflife, (str, datetime.timedelta)): - raise ValueError( - "halflife must be a string or datetime.timedelta object" - ) + if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): + raise ValueError("halflife must be a timedelta convertible object") if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") self._deltas = _calculate_deltas(self.times, self.halflife) @@ -404,7 +403,7 @@ def __init__( self._com = 1.0 else: if self.halflife is not None and isinstance( - self.halflife, (str, datetime.timedelta) + self.halflife, (str, datetime.timedelta, np.timedelta64) ): raise ValueError( "halflife can only be a timedelta convertible argument if " diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index f42a1a5449c5c..8977d1a0d9d1b 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -102,7 +102,7 @@ def engine_and_raw(request): return request.param -@pytest.fixture(params=["1 day", timedelta(days=1)]) +@pytest.fixture(params=["1 day", timedelta(days=1), np.timedelta64(1, "D")]) def halflife_with_times(request): """Halflife argument for EWM when times is specified.""" return request.param diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index b1e8b43258750..66cd36d121750 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -90,7 +90,7 @@ def test_ewma_times_not_same_length(): def test_ewma_halflife_not_correct_type(): - msg = "halflife must be a string or datetime.timedelta object" + msg = "halflife must be a timedelta convertible object" with pytest.raises(ValueError, match=msg): Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
- [x] closes #47003 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47026
2022-05-15T01:48:39Z
2022-05-18T13:21:25Z
2022-05-18T13:21:25Z
2022-05-18T16:21:20Z
DEPR: groupby numeric_only default
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 128fd68674f96..af30add139222 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -493,7 +493,8 @@ retained by specifying ``group_keys=False``. ``numeric_only`` default value ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Across the DataFrame operations such as ``min``, ``sum``, and ``idxmax``, the default +Across the DataFrame and DataFrameGroupBy operations such as +``min``, ``sum``, and ``idxmax``, the default value of the ``numeric_only`` argument, if it exists at all, was inconsistent. Furthermore, operations with the default value ``None`` can lead to surprising results. (:issue:`46560`) @@ -523,6 +524,8 @@ gained the ``numeric_only`` argument. - :meth:`DataFrame.cov` - :meth:`DataFrame.idxmin` - :meth:`DataFrame.idxmax` +- :meth:`.DataFrameGroupBy.cummin` +- :meth:`.DataFrameGroupBy.cummax` - :meth:`.DataFrameGroupBy.idxmin` - :meth:`.DataFrameGroupBy.idxmax` - :meth:`.GroupBy.var` diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f725ae061cedb..2acf5c826eb57 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -28,6 +28,7 @@ from pandas._libs import ( Interval, + lib, reduction as libreduction, ) from pandas._typing import ( @@ -1128,10 +1129,15 @@ def _wrap_applied_output_series( return self._reindex_output(result) def _cython_transform( - self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs + self, + how: str, + numeric_only: bool | lib.NoDefault = lib.no_default, + axis: int = 0, + **kwargs, ) -> DataFrame: assert axis == 0 # handled by caller # TODO: no tests with self.ndim == 1 for DataFrameGroupBy + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis) # With self.axis == 0, we have multi-block tests # e.g. test_rank_min_int, test_cython_transform_frame @@ -1139,7 +1145,8 @@ def _cython_transform( # With self.axis == 1, _get_data_to_aggregate does a transpose # so we always have a single block. mgr: Manager2D = self._get_data_to_aggregate() - if numeric_only: + orig_mgr_len = len(mgr) + if numeric_only_bool: mgr = mgr.get_numeric_data(copy=False) def arr_func(bvalues: ArrayLike) -> ArrayLike: @@ -1152,8 +1159,8 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike: res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True) res_mgr.set_axis(1, mgr.axes[1]) - if len(res_mgr) < len(mgr): - warn_dropping_nuisance_columns_deprecated(type(self), how) + if len(res_mgr) < orig_mgr_len: + warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only) res_df = self.obj._constructor(res_mgr) if self.axis == 1: @@ -1269,7 +1276,9 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: output[i] = sgb.transform(wrapper) except TypeError: # e.g. trying to call nanmean with string values - warn_dropping_nuisance_columns_deprecated(type(self), "transform") + warn_dropping_nuisance_columns_deprecated( + type(self), "transform", numeric_only=False + ) else: inds.append(i) @@ -1559,19 +1568,27 @@ def nunique(self, dropna: bool = True) -> DataFrame: _shared_docs["idxmax"], numeric_only_default="True for axis=0, False for axis=1", ) - def idxmax(self, axis=0, skipna: bool = True, numeric_only: bool | None = None): + def idxmax( + self, + axis=0, + skipna: bool = True, + numeric_only: bool | lib.NoDefault = lib.no_default, + ): axis = DataFrame._get_axis_number(axis) - if numeric_only is None: - numeric_only = None if axis == 0 else False + if numeric_only is lib.no_default: + # Cannot use self._resolve_numeric_only; we must pass None to + # DataFrame.idxmax for backwards compatibility + numeric_only_arg = None if axis == 0 else False + else: + numeric_only_arg = cast(bool, numeric_only) def func(df): - # NB: here we use numeric_only=None, in DataFrame it is False GH#38217 res = df._reduce( nanops.nanargmax, "argmax", axis=axis, skipna=skipna, - numeric_only=numeric_only, + numeric_only=numeric_only_arg, ) indices = res._values index = df._get_axis(axis) @@ -1579,25 +1596,35 @@ def func(df): return df._constructor_sliced(result, index=res.index) func.__name__ = "idxmax" - return self._python_apply_general(func, self._obj_with_exclusions) + result = self._python_apply_general(func, self._obj_with_exclusions) + self._maybe_warn_numeric_only_depr("idxmax", result, numeric_only) + return result @doc( _shared_docs["idxmin"], numeric_only_default="True for axis=0, False for axis=1", ) - def idxmin(self, axis=0, skipna: bool = True, numeric_only: bool | None = None): + def idxmin( + self, + axis=0, + skipna: bool = True, + numeric_only: bool | lib.NoDefault = lib.no_default, + ): axis = DataFrame._get_axis_number(axis) - if numeric_only is None: - numeric_only = None if axis == 0 else False + if numeric_only is lib.no_default: + # Cannot use self._resolve_numeric_only; we must pass None to + # DataFrame.idxmin for backwards compatibility + numeric_only_arg = None if axis == 0 else False + else: + numeric_only_arg = cast(bool, numeric_only) def func(df): - # NB: here we use numeric_only=None, in DataFrame it is False GH#46560 res = df._reduce( nanops.nanargmin, "argmin", axis=axis, skipna=skipna, - numeric_only=numeric_only, + numeric_only=numeric_only_arg, ) indices = res._values index = df._get_axis(axis) @@ -1605,7 +1632,9 @@ def func(df): return df._constructor_sliced(result, index=res.index) func.__name__ = "idxmin" - return self._python_apply_general(func, self._obj_with_exclusions) + result = self._python_apply_general(func, self._obj_with_exclusions) + self._maybe_warn_numeric_only_depr("idxmin", result, numeric_only) + return result boxplot = boxplot_frame_groupby diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 70f8e0a752dcb..0203d54e0de86 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -939,8 +939,15 @@ def wrapper(*args, **kwargs): if kwargs.get("axis", None) is None: kwargs["axis"] = self.axis + numeric_only = kwargs.get("numeric_only", lib.no_default) + def curried(x): - return f(x, *args, **kwargs) + with warnings.catch_warnings(): + # Catch any warnings from dispatch to DataFrame; we'll emit + # a warning for groupby below + match = "The default value of numeric_only " + warnings.filterwarnings("ignore", match, FutureWarning) + return f(x, *args, **kwargs) # preserve the name so we can detect it when calling plot methods, # to avoid duplicates @@ -956,6 +963,13 @@ def curried(x): curried, self._obj_with_exclusions, is_transform=is_transform ) + if self._selected_obj.ndim != 1 and self.axis != 1: + missing = self._obj_with_exclusions.columns.difference(result.columns) + if len(missing) > 0: + warn_dropping_nuisance_columns_deprecated( + type(self), name, numeric_only + ) + if self.grouper.has_dropped_na and is_transform: # result will have dropped rows due to nans, fill with null # and ensure index is ordered same as the input @@ -1223,7 +1237,9 @@ def _wrap_applied_output( ): raise AbstractMethodError(self) - def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: + def _resolve_numeric_only( + self, numeric_only: bool | lib.NoDefault, axis: int + ) -> bool: """ Determine subclass-specific default value for 'numeric_only'. @@ -1233,6 +1249,8 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: Parameters ---------- numeric_only : bool or lib.no_default + axis : int + Axis passed to the groupby op (not self.axis). Returns ------- @@ -1243,7 +1261,7 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: # i.e. not explicitly passed by user if self.obj.ndim == 2: # i.e. DataFrameGroupBy - numeric_only = True + numeric_only = axis != 1 # GH#42395 GH#43108 GH#43154 # Regression from 1.2.5 to 1.3 caused object columns to be dropped if self.axis: @@ -1253,7 +1271,6 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: check = obj._get_numeric_data() if len(obj.columns) and not len(check.columns) and not obj.empty: numeric_only = False - # TODO: v1.4+ Add FutureWarning else: numeric_only = False @@ -1262,6 +1279,27 @@ def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: # expected "bool") return numeric_only # type: ignore[return-value] + def _maybe_warn_numeric_only_depr( + self, how: str, result: DataFrame | Series, numeric_only: bool | lib.NoDefault + ) -> None: + """Emit warning on numeric_only behavior deprecation when appropriate. + + Parameters + ---------- + how : str + Groupby kernel name. + result : + Result of the groupby operation. + numeric_only : bool or lib.no_default + Argument as passed by user. + """ + if ( + self._obj_with_exclusions.ndim != 1 + and result.ndim > 1 + and len(result.columns) < len(self._obj_with_exclusions.columns) + ): + warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only) + # ----------------------------------------------------------------- # numba @@ -1522,7 +1560,9 @@ def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs): except TypeError: if raise_on_typeerror: raise - warn_dropping_nuisance_columns_deprecated(type(self), "agg") + warn_dropping_nuisance_columns_deprecated( + type(self), "agg", numeric_only=False + ) continue key = base.OutputKey(label=name, position=idx) @@ -1536,7 +1576,7 @@ def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs): @final def _agg_general( self, - numeric_only: bool = True, + numeric_only: bool | lib.NoDefault = True, min_count: int = -1, *, alias: str, @@ -1598,17 +1638,19 @@ def _cython_agg_general( self, how: str, alt: Callable, - numeric_only: bool, + numeric_only: bool | lib.NoDefault, min_count: int = -1, ignore_failures: bool = True, ): # Note: we never get here with how="ohlc" for DataFrameGroupBy; # that goes through SeriesGroupBy + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) data = self._get_data_to_aggregate() is_ser = data.ndim == 1 - if numeric_only: + orig_len = len(data) + if numeric_only_bool: if is_ser and not is_numeric_dtype(self._selected_obj.dtype): # GH#41291 match Series behavior kwd_name = "numeric_only" @@ -1638,8 +1680,8 @@ def array_func(values: ArrayLike) -> ArrayLike: # continue and exclude the block new_mgr = data.grouped_reduce(array_func, ignore_failures=ignore_failures) - if not is_ser and len(new_mgr) < len(data): - warn_dropping_nuisance_columns_deprecated(type(self), how) + if not is_ser and len(new_mgr) < orig_len: + warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only) res = self._wrap_agged_manager(new_mgr) if is_ser: @@ -1997,7 +2039,7 @@ def mean( 2 4.0 Name: B, dtype: float64 """ - numeric_only_bool = self._resolve_numeric_only(numeric_only) + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) if maybe_use_numba(engine): from pandas.core._numba.kernels import sliding_mean @@ -2007,7 +2049,7 @@ def mean( result = self._cython_agg_general( "mean", alt=lambda x: Series(x).mean(numeric_only=numeric_only_bool), - numeric_only=numeric_only_bool, + numeric_only=numeric_only, ) return result.__finalize__(self.obj, method="groupby") @@ -2031,12 +2073,12 @@ def median(self, numeric_only: bool | lib.NoDefault = lib.no_default): Series or DataFrame Median of values within each group. """ - numeric_only_bool = self._resolve_numeric_only(numeric_only) + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) result = self._cython_agg_general( "median", alt=lambda x: Series(x).median(numeric_only=numeric_only_bool), - numeric_only=numeric_only_bool, + numeric_only=numeric_only, ) return result.__finalize__(self.obj, method="groupby") @@ -2092,7 +2134,7 @@ def std( return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof)) else: - return self._get_cythonized_result( + result = self._get_cythonized_result( libgroupby.group_var, cython_dtype=np.dtype(np.float64), numeric_only=numeric_only, @@ -2100,6 +2142,8 @@ def std( post_processing=lambda vals, inference: np.sqrt(vals), ddof=ddof, ) + self._maybe_warn_numeric_only_depr("std", result, numeric_only) + return result @final @Substitution(name="groupby") @@ -2153,12 +2197,12 @@ def var( return self._numba_agg_general(sliding_var, engine_kwargs, ddof) else: - numeric_only_bool = self._resolve_numeric_only(numeric_only) + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) if ddof == 1: return self._cython_agg_general( "var", alt=lambda x: Series(x).var(ddof=ddof), - numeric_only=numeric_only_bool, + numeric_only=numeric_only, ignore_failures=numeric_only is lib.no_default, ) else: @@ -2193,6 +2237,8 @@ def sem(self, ddof: int = 1, numeric_only: bool | lib.NoDefault = lib.no_default Standard error of the mean of values within each group. """ result = self.std(ddof=ddof, numeric_only=numeric_only) + self._maybe_warn_numeric_only_depr("sem", result, numeric_only) + if result.ndim == 1: result /= np.sqrt(self.count()) else: @@ -2253,8 +2299,6 @@ def sum( engine_kwargs, ) else: - numeric_only = self._resolve_numeric_only(numeric_only) - # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in # _agg_general() returns. GH #31422 @@ -2273,8 +2317,6 @@ def sum( def prod( self, numeric_only: bool | lib.NoDefault = lib.no_default, min_count: int = 0 ): - numeric_only = self._resolve_numeric_only(numeric_only) - return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod ) @@ -3050,7 +3092,7 @@ def quantile( a 2.0 b 3.0 """ - numeric_only_bool = self._resolve_numeric_only(numeric_only) + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, np.dtype | None]: if is_object_dtype(vals): @@ -3153,7 +3195,9 @@ def blk_func(values: ArrayLike) -> ArrayLike: and not is_ser and len(res_mgr.items) != len(mgr.items) ): - warn_dropping_nuisance_columns_deprecated(type(self), "quantile") + warn_dropping_nuisance_columns_deprecated( + type(self), "quantile", numeric_only + ) if len(res_mgr.items) == 0: # re-call grouped_reduce to get the desired exception message @@ -3447,7 +3491,7 @@ def cumsum(self, axis=0, *args, **kwargs): @final @Substitution(name="groupby") @Appender(_common_see_also) - def cummin(self, axis=0, **kwargs): + def cummin(self, axis=0, numeric_only=False, **kwargs): """ Cumulative min for each group. @@ -3460,12 +3504,14 @@ def cummin(self, axis=0, **kwargs): f = lambda x: np.minimum.accumulate(x, axis) return self._python_apply_general(f, self._selected_obj, is_transform=True) - return self._cython_transform("cummin", numeric_only=False, skipna=skipna) + return self._cython_transform( + "cummin", numeric_only=numeric_only, skipna=skipna + ) @final @Substitution(name="groupby") @Appender(_common_see_also) - def cummax(self, axis=0, **kwargs): + def cummax(self, axis=0, numeric_only=False, **kwargs): """ Cumulative max for each group. @@ -3478,7 +3524,9 @@ def cummax(self, axis=0, **kwargs): f = lambda x: np.maximum.accumulate(x, axis) return self._python_apply_general(f, self._selected_obj, is_transform=True) - return self._cython_transform("cummax", numeric_only=False, skipna=skipna) + return self._cython_transform( + "cummax", numeric_only=numeric_only, skipna=skipna + ) @final def _get_cythonized_result( @@ -3532,7 +3580,7 @@ def _get_cythonized_result( ------- `Series` or `DataFrame` with filled values """ - numeric_only = self._resolve_numeric_only(numeric_only) + numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0) if post_processing and not callable(post_processing): raise ValueError("'post_processing' must be a callable!") @@ -3601,15 +3649,16 @@ def blk_func(values: ArrayLike) -> ArrayLike: # Operate block-wise instead of column-by-column is_ser = obj.ndim == 1 mgr = self._get_data_to_aggregate() + orig_mgr_len = len(mgr) - if numeric_only: + if numeric_only_bool: mgr = mgr.get_numeric_data() res_mgr = mgr.grouped_reduce(blk_func, ignore_failures=True) - if not is_ser and len(res_mgr.items) != len(mgr.items): + if not is_ser and len(res_mgr.items) != orig_mgr_len: howstr = how.replace("group_", "") - warn_dropping_nuisance_columns_deprecated(type(self), howstr) + warn_dropping_nuisance_columns_deprecated(type(self), howstr, numeric_only) if len(res_mgr.items) == 0: # We re-call grouped_reduce to get the right exception message @@ -4155,13 +4204,27 @@ def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiInde return mi -def warn_dropping_nuisance_columns_deprecated(cls, how: str) -> None: - warnings.warn( - "Dropping invalid columns in " - f"{cls.__name__}.{how} is deprecated. " - "In a future version, a TypeError will be raised. " - f"Before calling .{how}, select only columns which " - "should be valid for the function.", - FutureWarning, - stacklevel=find_stack_level(), - ) +def warn_dropping_nuisance_columns_deprecated(cls, how: str, numeric_only) -> None: + if how == "add": + how = "sum" + if numeric_only is not lib.no_default and not numeric_only: + # numeric_only was specified and falsey but still dropped nuisance columns + warnings.warn( + "Dropping invalid columns in " + f"{cls.__name__}.{how} is deprecated. " + "In a future version, a TypeError will be raised. " + f"Before calling .{how}, select only columns which " + "should be valid for the function.", + FutureWarning, + stacklevel=find_stack_level(), + ) + elif numeric_only is lib.no_default: + warnings.warn( + "The default value of numeric_only in " + f"{cls.__name__}.{how} is deprecated. " + "In a future version, numeric_only will default to False. " + f"Either specify numeric_only or select only columns which " + "should be valid for the function.", + FutureWarning, + stacklevel=find_stack_level(), + ) diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index 336865d32167d..711f1835446a5 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -1,5 +1,7 @@ import pytest +from pandas.core.dtypes.common import is_numeric_dtype + import pandas as pd import pandas._testing as tm from pandas.tests.extension.base.base import BaseExtensionTests @@ -96,7 +98,15 @@ def test_in_numeric_groupby(self, data_for_grouping): "C": [1, 1, 1, 1, 1, 1, 1, 1], } ) - result = df.groupby("A").sum().columns + + dtype = data_for_grouping.dtype + if is_numeric_dtype(dtype) or dtype.name == "decimal": + warn = None + else: + warn = FutureWarning + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + result = df.groupby("A").sum().columns if data_for_grouping.dtype._is_numeric: expected = pd.Index(["B", "C"]) diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index ba89a76a7f8c2..fedcc0e2a2284 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1785,7 +1785,9 @@ def test_stack_multiple_bug(self): multi = df.set_index(["DATE", "ID"]) multi.columns.name = "Params" unst = multi.unstack("ID") - down = unst.resample("W-THU").mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + down = unst.resample("W-THU").mean() rs = down.stack("ID") xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID") diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 2b248afb42057..b4a3a60e72139 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -71,7 +71,9 @@ def test_metadata_propagation_indiv_groupby(self): "D": np.random.randn(8), } ) - result = df.groupby("A").sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").sum() tm.assert_metadata_equivalent(df, result) def test_metadata_propagation_indiv_resample(self): diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index bdb33bff5eadd..37b02571158b9 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -238,7 +238,10 @@ def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype [[1, 2, 3, 4, 5, 6]] * 3, columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]), ).astype({("a", "j"): dtype, ("b", "j"): dtype}) - result = df.groupby(level=1, axis=1).agg(func) + warn = FutureWarning if func == "std" else None + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + result = df.groupby(level=1, axis=1).agg(func) expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype( result_dtype_dict ) @@ -262,7 +265,10 @@ def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict): columns=Index([10, 20, 10, 20], name="x"), dtype="int64", ).astype({10: "Int64"}) - result = df.groupby("x", axis=1).agg(func) + warn = FutureWarning if func == "std" else None + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + result = df.groupby("x", axis=1).agg(func) expected = DataFrame( data=expected_data, index=Index([0, 1, 0], name="y"), diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py index 7c64d82608c9e..e541abb368a02 100644 --- a/pandas/tests/groupby/test_allowlist.py +++ b/pandas/tests/groupby/test_allowlist.py @@ -187,7 +187,9 @@ def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort): if op in AGG_FUNCTIONS_WITH_SKIPNA: grouped = frame.groupby(level=level, axis=axis, sort=sort) - with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"): + with tm.assert_produces_warning( + warn, match="The 'mad' method is deprecated", raise_on_extra_warnings=False + ): result = getattr(grouped, op)(skipna=skipna) with tm.assert_produces_warning(FutureWarning): expected = getattr(frame, op)(level=level, axis=axis, skipna=skipna) @@ -196,8 +198,8 @@ def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort): tm.assert_frame_equal(result, expected) else: grouped = frame.groupby(level=level, axis=axis, sort=sort) - result = getattr(grouped, op)() with tm.assert_produces_warning(FutureWarning): + result = getattr(grouped, op)() expected = getattr(frame, op)(level=level, axis=axis) if sort: expected = expected.sort_index(axis=axis, level=level) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index abe1b8f13e32e..004e55f4d161f 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -103,7 +103,9 @@ def test_basic(): # TODO: split this test gb = df.groupby("A", observed=False) exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True) expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)}) - result = gb.sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.sum() tm.assert_frame_equal(result, expected) # GH 8623 @@ -314,6 +316,7 @@ def test_apply(ordered): tm.assert_series_equal(result, expected) +@pytest.mark.filterwarnings("ignore:.*value of numeric_only.*:FutureWarning") def test_observed(observed): # multiple groupers, don't re-expand the output space # of the grouper @@ -807,8 +810,12 @@ def test_preserve_categorical_dtype(): } ) for col in ["C1", "C2"]: - result1 = df.groupby(by=col, as_index=False, observed=False).mean() - result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = df.groupby(by=col, as_index=False, observed=False).mean() + result2 = ( + df.groupby(by=col, as_index=True, observed=False).mean().reset_index() + ) expected = exp_full.reindex(columns=result1.columns) tm.assert_frame_equal(result1, expected) tm.assert_frame_equal(result2, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index c99405dfccb66..206d37e1a800e 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -4,6 +4,7 @@ import numpy as np import pytest +from pandas._libs import lib from pandas.errors import UnsupportedFunctionCall import pandas as pd @@ -259,7 +260,9 @@ def _check(self, df, method, expected_columns, expected_columns_numeric): # these have numeric_only kwarg, but default to False warn = FutureWarning - with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + with tm.assert_produces_warning( + warn, match="Dropping invalid columns", raise_on_extra_warnings=False + ): result = getattr(gb, method)() tm.assert_index_equal(result.columns, expected_columns_numeric) @@ -297,24 +300,26 @@ def gni(self, df): return gni # TODO: non-unique columns, as_index=False - @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmax(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 # non-cython calls should not include the grouper expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3]) expected.index.name = "A" - result = gb.idxmax() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.idxmax() tm.assert_frame_equal(result, expected) - @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmin(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 # non-cython calls should not include the grouper expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3]) expected.index.name = "A" - result = gb.idxmin() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.idxmin() tm.assert_frame_equal(result, expected) def test_mad(self, gb, gni): @@ -1238,3 +1243,114 @@ def test_groupby_sum_timedelta_with_nat(): res = gb["b"].sum(min_count=2) expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index) tm.assert_series_equal(res, expected) + + +@pytest.mark.parametrize( + "kernel, numeric_only_default, drops_nuisance, has_arg", + [ + ("all", False, False, False), + ("any", False, False, False), + ("bfill", False, False, False), + ("corr", True, False, True), + ("corrwith", True, False, True), + ("cov", True, False, True), + ("cummax", False, True, True), + ("cummin", False, True, True), + ("cumprod", True, True, True), + ("cumsum", True, True, True), + ("diff", False, False, False), + ("ffill", False, False, False), + ("fillna", False, False, False), + ("first", False, False, True), + ("idxmax", True, False, True), + ("idxmin", True, False, True), + ("last", False, False, True), + ("max", False, True, True), + ("mean", True, True, True), + ("median", True, True, True), + ("min", False, True, True), + ("nth", False, False, False), + ("nunique", False, False, False), + ("pct_change", False, False, False), + ("prod", True, True, True), + ("quantile", True, False, True), + ("sem", True, True, True), + ("skew", True, False, True), + ("std", True, True, True), + ("sum", True, True, True), + ("var", True, False, True), + ], +) +@pytest.mark.parametrize("numeric_only", [True, False, lib.no_default]) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +def test_deprecate_numeric_only( + kernel, numeric_only_default, drops_nuisance, has_arg, numeric_only, keys +): + # GH#46072 + # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False + # has_arg: Whether the op has a numeric_only arg + df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]}) + + if kernel == "corrwith": + args = (df,) + elif kernel == "nth" or kernel == "fillna": + args = (0,) + else: + args = () + kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only} + + gb = df.groupby(keys) + method = getattr(gb, kernel) + if has_arg and ( + # Cases where b does not appear in the result + numeric_only is True + or (numeric_only is lib.no_default and numeric_only_default) + or drops_nuisance + ): + if numeric_only is True or (not numeric_only_default and not drops_nuisance): + warn = None + else: + warn = FutureWarning + if numeric_only is lib.no_default and numeric_only_default: + msg = f"The default value of numeric_only in DataFrameGroupBy.{kernel}" + else: + msg = f"Dropping invalid columns in DataFrameGroupBy.{kernel}" + with tm.assert_produces_warning(warn, match=msg): + result = method(*args, **kwargs) + + assert "b" not in result.columns + elif ( + # kernels that work on any dtype and have numeric_only arg + kernel in ("first", "last", "corrwith") + or ( + # kernels that work on any dtype and don't have numeric_only arg + kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique") + and numeric_only is lib.no_default + ) + ): + result = method(*args, **kwargs) + assert "b" in result.columns + elif has_arg: + assert numeric_only is not True + assert numeric_only is not lib.no_default or numeric_only_default is False + assert not drops_nuisance + # kernels that are successful on any dtype were above; this will fail + msg = ( + "(not allowed for this dtype" + "|must be a string or a number" + "|cannot be performed against 'object' dtypes" + "|must be a string or a real number)" + ) + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + elif not has_arg and numeric_only is not lib.no_default: + with pytest.raises( + TypeError, match="got an unexpected keyword argument 'numeric_only'" + ): + method(*args, **kwargs) + else: + assert kernel in ("diff", "pct_change") + assert numeric_only is lib.no_default + # Doesn't have numeric_only argument and fails on nuisance columns + with pytest.raises(TypeError, match=r"unsupported operand type"): + method(*args, **kwargs) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 016e817e43402..61951292d55a8 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -474,13 +474,17 @@ def test_frame_groupby_columns(tsframe): def test_frame_set_name_single(df): grouped = df.groupby("A") - result = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.mean() assert result.index.name == "A" - result = df.groupby("A", as_index=False).mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A", as_index=False).mean() assert result.index.name != "A" - result = grouped.agg(np.mean) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.agg(np.mean) assert result.index.name == "A" result = grouped.agg({"C": np.mean, "D": np.std}) @@ -503,8 +507,10 @@ def test_multi_func(df): col2 = df["B"] grouped = df.groupby([col1.get, col2.get]) - agged = grouped.mean() - expected = df.groupby(["A", "B"]).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = grouped.mean() + expected = df.groupby(["A", "B"]).mean() # TODO groupby get drops names tm.assert_frame_equal( @@ -661,13 +667,16 @@ def test_groupby_as_index_agg(df): # single-key - result = grouped.agg(np.mean) - expected = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.agg(np.mean) + expected = grouped.mean() tm.assert_frame_equal(result, expected) result2 = grouped.agg({"C": np.mean, "D": np.sum}) - expected2 = grouped.mean() - expected2["D"] = grouped.sum()["D"] + with tm.assert_produces_warning(FutureWarning, match=msg): + expected2 = grouped.mean() + expected2["D"] = grouped.sum()["D"] tm.assert_frame_equal(result2, expected2) grouped = df.groupby("A", as_index=True) @@ -754,8 +763,10 @@ def test_as_index_series_return_frame(df): grouped = df.groupby("A", as_index=False) grouped2 = df.groupby(["A", "B"], as_index=False) - result = grouped["C"].agg(np.sum) - expected = grouped.agg(np.sum).loc[:, ["A", "C"]] + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped["C"].agg(np.sum) + expected = grouped.agg(np.sum).loc[:, ["A", "C"]] assert isinstance(result, DataFrame) tm.assert_frame_equal(result, expected) @@ -765,7 +776,8 @@ def test_as_index_series_return_frame(df): tm.assert_frame_equal(result2, expected2) result = grouped["C"].sum() - expected = grouped.sum().loc[:, ["A", "C"]] + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = grouped.sum().loc[:, ["A", "C"]] assert isinstance(result, DataFrame) tm.assert_frame_equal(result, expected) @@ -789,8 +801,10 @@ def test_groupby_as_index_cython(df): # single-key grouped = data.groupby("A", as_index=False) - result = grouped.mean() - expected = data.groupby(["A"]).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.mean() + expected = data.groupby(["A"]).mean() expected.insert(0, "A", expected.index) expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) @@ -859,15 +873,18 @@ def test_groupby_multi_corner(df): def test_omit_nuisance(df): grouped = df.groupby("A") - agged = grouped.agg(np.mean) - exp = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = grouped.agg(np.mean) + exp = grouped.mean() tm.assert_frame_equal(agged, exp) df = df.loc[:, ["A", "C", "D"]] df["E"] = datetime.now() grouped = df.groupby("A") - result = grouped.agg(np.sum) - expected = grouped.sum() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.agg(np.sum) + expected = grouped.sum() tm.assert_frame_equal(result, expected) # won't work with axis = 1 @@ -898,7 +915,7 @@ def test_keep_nuisance_agg(df, agg_function): @pytest.mark.parametrize("numeric_only", [lib.no_default, True, False]) def test_omit_nuisance_agg(df, agg_function, numeric_only): # GH 38774, GH 38815 - if not numeric_only and agg_function != "sum": + if numeric_only is lib.no_default or (not numeric_only and agg_function != "sum"): # sum doesn't drop strings warn = FutureWarning else: @@ -913,7 +930,13 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only): with pytest.raises(klass, match="could not convert string to float"): getattr(grouped, agg_function)(numeric_only=numeric_only) else: - with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + if numeric_only is lib.no_default: + msg = ( + f"The default value of numeric_only in DataFrameGroupBy.{agg_function}" + ) + else: + msg = "Dropping invalid columns" + with tm.assert_produces_warning(warn, match=msg): result = getattr(grouped, agg_function)(numeric_only=numeric_only) if ( (numeric_only is lib.no_default or not numeric_only) @@ -923,9 +946,18 @@ def test_omit_nuisance_agg(df, agg_function, numeric_only): columns = ["A", "B", "C", "D"] else: columns = ["A", "C", "D"] - expected = getattr(df.loc[:, columns].groupby("A"), agg_function)( - numeric_only=numeric_only - ) + if agg_function == "sum" and numeric_only is False: + # sum doesn't drop nuisance string columns + warn = None + elif agg_function in ("sum", "std", "var", "sem") and numeric_only is not True: + warn = FutureWarning + else: + warn = None + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + expected = getattr(df.loc[:, columns].groupby("A"), agg_function)( + numeric_only=numeric_only + ) tm.assert_frame_equal(result, expected) @@ -941,8 +973,10 @@ def test_omit_nuisance_warnings(df): def test_omit_nuisance_python_multiple(three_group): grouped = three_group.groupby(["A", "B"]) - agged = grouped.agg(np.mean) - exp = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = grouped.agg(np.mean) + exp = grouped.mean() tm.assert_frame_equal(agged, exp) @@ -959,8 +993,10 @@ def test_empty_groups_corner(mframe): ) grouped = df.groupby(["k1", "k2"]) - result = grouped.agg(np.mean) - expected = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.agg(np.mean) + expected = grouped.mean() tm.assert_frame_equal(result, expected) grouped = mframe[3:5].groupby(level=0) @@ -982,7 +1018,9 @@ def test_wrap_aggregated_output_multindex(mframe): df["baz", "two"] = "peekaboo" keys = [np.array([0, 0, 1]), np.array([0, 0, 1])] - agged = df.groupby(keys).agg(np.mean) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = df.groupby(keys).agg(np.mean) assert isinstance(agged.columns, MultiIndex) def aggfun(ser): @@ -1143,15 +1181,19 @@ def test_groupby_with_hier_columns(): # add a nuisance column sorted_columns, _ = columns.sortlevel(0) df["A", "foo"] = "bar" - result = df.groupby(level=0).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(level=0).mean() tm.assert_index_equal(result.columns, df.columns[:-1]) def test_grouping_ndarray(df): grouped = df.groupby(df["A"].values) - result = grouped.sum() - expected = df.groupby("A").sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.sum() + expected = df.groupby("A").sum() tm.assert_frame_equal( result, expected, check_names=False ) # Note: no names when grouping by value @@ -1179,8 +1221,10 @@ def test_groupby_wrong_multi_labels(): def test_groupby_series_with_name(df): - result = df.groupby(df["A"]).mean() - result2 = df.groupby(df["A"], as_index=False).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(df["A"]).mean() + result2 = df.groupby(df["A"], as_index=False).mean() assert result.index.name == "A" assert "A" in result2 @@ -1331,8 +1375,10 @@ def test_groupby_unit64_float_conversion(): def test_groupby_list_infer_array_like(df): - result = df.groupby(list(df["A"])).mean() - expected = df.groupby(df["A"]).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(list(df["A"])).mean() + expected = df.groupby(df["A"]).mean() tm.assert_frame_equal(result, expected, check_names=False) with pytest.raises(KeyError, match=r"^'foo'$"): @@ -1445,7 +1491,9 @@ def test_groupby_2d_malformed(): d["zeros"] = [0, 0] d["ones"] = [1, 1] d["label"] = ["l1", "l2"] - tmp = d.groupby(["group"]).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + tmp = d.groupby(["group"]).mean() res_values = np.array([[0.0, 1.0], [0.0, 1.0]]) tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"])) tm.assert_numpy_array_equal(tmp.values, res_values) @@ -1611,10 +1659,13 @@ def f(group): def test_no_dummy_key_names(df): # see gh-1291 - result = df.groupby(df["A"].values).sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(df["A"].values).sum() assert result.index.name is None - result = df.groupby([df["A"].values, df["B"].values]).sum() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby([df["A"].values, df["B"].values]).sum() assert result.index.names == (None, None) @@ -2634,7 +2685,9 @@ def test_groupby_aggregation_numeric_with_non_numeric_dtype(): ) gb = df.groupby(by=["x"]) - result = gb.sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.sum() tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 54cde30ceac92..b665843728165 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -112,5 +112,7 @@ def test_groupby_resample_preserves_subclass(obj): df = df.set_index("Date") # Confirm groupby.resample() preserves dataframe type - result = df.groupby("Buyer").resample("5D").sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("Buyer").resample("5D").sum() assert isinstance(result, obj) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index c6e4bec3f7b2c..85602fdf7274a 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -59,8 +59,10 @@ def test_column_select_via_attr(self, df): tm.assert_series_equal(result, expected) df["mean"] = 1.5 - result = df.groupby("A").mean() - expected = df.groupby("A").agg(np.mean) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").mean() + expected = df.groupby("A").agg(np.mean) tm.assert_frame_equal(result, expected) def test_getitem_list_of_columns(self): @@ -284,25 +286,30 @@ def test_grouper_column_and_index(self): {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]}, index=idx, ) - result = df_multi.groupby(["B", pd.Grouper(level="inner")]).mean() - expected = df_multi.reset_index().groupby(["B", "inner"]).mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_multi.groupby(["B", pd.Grouper(level="inner")]).mean() + expected = df_multi.reset_index().groupby(["B", "inner"]).mean() tm.assert_frame_equal(result, expected) # Test the reverse grouping order - result = df_multi.groupby([pd.Grouper(level="inner"), "B"]).mean() - expected = df_multi.reset_index().groupby(["inner", "B"]).mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_multi.groupby([pd.Grouper(level="inner"), "B"]).mean() + expected = df_multi.reset_index().groupby(["inner", "B"]).mean() tm.assert_frame_equal(result, expected) # Grouping a single-index frame by a column and the index should # be equivalent to resetting the index and grouping by two columns df_single = df_multi.reset_index("outer") - result = df_single.groupby(["B", pd.Grouper(level="inner")]).mean() - expected = df_single.reset_index().groupby(["B", "inner"]).mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_single.groupby(["B", pd.Grouper(level="inner")]).mean() + expected = df_single.reset_index().groupby(["B", "inner"]).mean() tm.assert_frame_equal(result, expected) # Test the reverse grouping order - result = df_single.groupby([pd.Grouper(level="inner"), "B"]).mean() - expected = df_single.reset_index().groupby(["inner", "B"]).mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df_single.groupby([pd.Grouper(level="inner"), "B"]).mean() + expected = df_single.reset_index().groupby(["inner", "B"]).mean() tm.assert_frame_equal(result, expected) def test_groupby_levels_and_columns(self): @@ -376,8 +383,10 @@ def test_empty_groups(self, df): def test_groupby_grouper(self, df): grouped = df.groupby("A") - result = df.groupby(grouped.grouper).mean() - expected = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(grouped.grouper).mean() + expected = grouped.mean() tm.assert_frame_equal(result, expected) def test_groupby_dict_mapping(self): diff --git a/pandas/tests/groupby/test_index_as_string.py b/pandas/tests/groupby/test_index_as_string.py index 971a447b84cae..501a21981a148 100644 --- a/pandas/tests/groupby/test_index_as_string.py +++ b/pandas/tests/groupby/test_index_as_string.py @@ -47,8 +47,11 @@ def series(): ], ) def test_grouper_index_level_as_string(frame, key_strs, groupers): - result = frame.groupby(key_strs).mean() - expected = frame.groupby(groupers).mean() + warn = FutureWarning if "B" not in key_strs or "outer" in frame.columns else None + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + result = frame.groupby(key_strs).mean() + expected = frame.groupby(groupers).mean() tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_pipe.py b/pandas/tests/groupby/test_pipe.py index 1229251f88c7d..4f58bcb5ee763 100644 --- a/pandas/tests/groupby/test_pipe.py +++ b/pandas/tests/groupby/test_pipe.py @@ -60,7 +60,9 @@ def f(dfgb, arg1): ) def g(dfgb, arg2): - return dfgb.sum() / dfgb.sum().sum() + arg2 + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + return dfgb.sum() / dfgb.sum().sum() + arg2 def h(df, arg3): return df.x + df.y - arg3 diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py index 0f7e71c99584d..20328426a69b2 100644 --- a/pandas/tests/groupby/test_quantile.py +++ b/pandas/tests/groupby/test_quantile.py @@ -246,9 +246,10 @@ def test_groupby_quantile_nullable_array(values, q): def test_groupby_quantile_skips_invalid_dtype(q, numeric_only): df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) - if numeric_only is None or numeric_only: + if numeric_only is lib.no_default or numeric_only: warn = FutureWarning if numeric_only is lib.no_default else None - with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + msg = "The default value of numeric_only in DataFrameGroupBy.quantile" + with tm.assert_produces_warning(warn, match=msg): result = df.groupby("a").quantile(q, numeric_only=numeric_only) expected = df.groupby("a")[["b"]].quantile(q) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 7c9d6e7a73087..ae725cbb2b588 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -105,14 +105,18 @@ def test_groupby_with_timegrouper(self): ) expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype="int64") - result1 = df.resample("5D").sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result1 = df.resample("5D").sum() tm.assert_frame_equal(result1, expected) df_sorted = df.sort_index() - result2 = df_sorted.groupby(Grouper(freq="5D")).sum() + with tm.assert_produces_warning(FutureWarning, match=msg): + result2 = df_sorted.groupby(Grouper(freq="5D")).sum() tm.assert_frame_equal(result2, expected) - result3 = df.groupby(Grouper(freq="5D")).sum() + with tm.assert_produces_warning(FutureWarning, match=msg): + result3 = df.groupby(Grouper(freq="5D")).sum() tm.assert_frame_equal(result3, expected) @pytest.mark.parametrize("should_sort", [True, False]) @@ -186,7 +190,9 @@ def test_timegrouper_with_reg_groups(self): } ).set_index(["Date", "Buyer"]) - result = df.groupby([Grouper(freq="A"), "Buyer"]).sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby([Grouper(freq="A"), "Buyer"]).sum() tm.assert_frame_equal(result, expected) expected = DataFrame( @@ -201,7 +207,8 @@ def test_timegrouper_with_reg_groups(self): ], } ).set_index(["Date", "Buyer"]) - result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum() tm.assert_frame_equal(result, expected) df_original = DataFrame( @@ -239,10 +246,13 @@ def test_timegrouper_with_reg_groups(self): } ).set_index(["Date", "Buyer"]) - result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum() + warn_msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum() tm.assert_frame_equal(result, expected) - result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum() expected = DataFrame( { "Buyer": "Carl Joe Mark".split(), @@ -258,7 +268,8 @@ def test_timegrouper_with_reg_groups(self): # passing the name df = df.reset_index() - result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum() tm.assert_frame_equal(result, expected) with pytest.raises(KeyError, match="'The grouper name foo is not found'"): @@ -266,9 +277,11 @@ def test_timegrouper_with_reg_groups(self): # passing the level df = df.set_index("Date") - result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum() tm.assert_frame_equal(result, expected) - result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum() tm.assert_frame_equal(result, expected) with pytest.raises(ValueError, match="The level foo is not valid"): @@ -277,7 +290,8 @@ def test_timegrouper_with_reg_groups(self): # multi names df = df.copy() df["Date"] = df.index + offsets.MonthEnd(2) - result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum() expected = DataFrame( { "Buyer": "Carl Joe Mark".split(), @@ -306,18 +320,22 @@ def test_timegrouper_with_reg_groups(self): [datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date" ), ) - result = df.groupby(Grouper(freq="1M")).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby(Grouper(freq="1M")).sum() tm.assert_frame_equal(result, expected) - result = df.groupby([Grouper(freq="1M")]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M")]).sum() tm.assert_frame_equal(result, expected) expected.index = expected.index.shift(1) assert expected.index.freq == offsets.MonthEnd() - result = df.groupby(Grouper(freq="1M", key="Date")).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby(Grouper(freq="1M", key="Date")).sum() tm.assert_frame_equal(result, expected) - result = df.groupby([Grouper(freq="1M", key="Date")]).sum() + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = df.groupby([Grouper(freq="1M", key="Date")]).sum() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("freq", ["D", "M", "A", "Q-APR"]) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 0492b143eaf1f..b325edaf2b1ea 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -203,15 +203,24 @@ def test_transform_axis_1_reducer(request, reduction_func): ): marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986") request.node.add_marker(marker) - warn = FutureWarning if reduction_func == "mad" else None + if reduction_func == "mad": + warn = FutureWarning + msg = "The 'mad' method is deprecated" + elif reduction_func in ("sem", "std"): + warn = FutureWarning + msg = "The default value of numeric_only" + else: + warn = None + msg = "" df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) - with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"): + with tm.assert_produces_warning(warn, match=msg): result = df.groupby([0, 0, 1], axis=1).transform(reduction_func) if reduction_func == "size": # size doesn't behave in the same manner; hardcode expected result expected = DataFrame(2 * [[2, 2, 1]], index=df.index, columns=df.columns) else: + warn = FutureWarning if reduction_func == "mad" else None with tm.assert_produces_warning(warn, match="The 'mad' method is deprecated"): expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T tm.assert_equal(result, expected) @@ -462,8 +471,10 @@ def test_transform_exclude_nuisance(df): def test_transform_function_aliases(df): - result = df.groupby("A").transform("mean") - expected = df.groupby("A").transform(np.mean) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").transform("mean") + expected = df.groupby("A").transform(np.mean) tm.assert_frame_equal(result, expected) result = df.groupby("A")["C"].transform("mean") @@ -774,8 +785,15 @@ def test_cython_transform_frame(op, args, targop): expected = gb.apply(targop) expected = expected.sort_index(axis=1) - tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index(axis=1)) - tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1)) + + warn = None if op == "shift" else FutureWarning + msg = "The default value of numeric_only" + with tm.assert_produces_warning(warn, match=msg): + result = gb.transform(op, *args).sort_index(axis=1) + tm.assert_frame_equal(result, expected) + with tm.assert_produces_warning(warn, match=msg): + result = getattr(gb, op)(*args).sort_index(axis=1) + tm.assert_frame_equal(result, expected) # individual columns for c in df: if ( diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index b5bae4759090a..21ef078bcf418 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -90,9 +90,10 @@ def test_groupby_resample_on_api(): } ) - expected = df.set_index("dates").groupby("key").resample("D").mean() - - result = df.groupby("key").resample("D", on="dates").mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.set_index("dates").groupby("key").resample("D").mean() + result = df.groupby("key").resample("D", on="dates").mean() tm.assert_frame_equal(result, expected) @@ -196,7 +197,9 @@ def tests_skip_nuisance(test_frame): tm.assert_frame_equal(result, expected) expected = r[["A", "B", "C"]].sum() - result = r.sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = r.sum() tm.assert_frame_equal(result, expected) @@ -643,10 +646,15 @@ def test_selection_api_validation(): exp = df_exp.resample("2D").sum() exp.index.name = "date" - tm.assert_frame_equal(exp, df.resample("2D", on="date").sum()) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.resample("2D", on="date").sum() + tm.assert_frame_equal(exp, result) exp.index.name = "d" - tm.assert_frame_equal(exp, df.resample("2D", level="d").sum()) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.resample("2D", level="d").sum() + tm.assert_frame_equal(exp, result) @pytest.mark.parametrize( @@ -809,9 +817,13 @@ def test_frame_downsample_method(method, numeric_only, expected_data): func = getattr(resampled, method) if method == "prod" and numeric_only is not True: warn = FutureWarning + msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated" + elif method == "sum" and numeric_only is lib.no_default: + warn = FutureWarning + msg = "The default value of numeric_only in DataFrameGroupBy.sum is deprecated" else: warn = None - msg = "Dropping invalid columns in DataFrameGroupBy.prod is deprecated" + msg = "" with tm.assert_produces_warning(warn, match=msg): result = func(numeric_only=numeric_only) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index cae2d77dfbd3f..5392ec88544a1 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -408,7 +408,9 @@ def test_resample_groupby_agg(): df["date"] = pd.to_datetime(df["date"]) resampled = df.groupby("cat").resample("Y", on="date") - expected = resampled.sum() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = resampled.sum() result = resampled.agg({"num": "sum"}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 5d6df078ee8c3..905c2af2d22a5 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -553,7 +553,9 @@ def test_mixed_type_join_with_suffix(self): df.insert(5, "dt", "foo") grouped = df.groupby("id") - mn = grouped.mean() + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + mn = grouped.mean() cn = grouped.count() # it works! diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 31f720b9ec336..0d3b9f4561b55 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -146,8 +146,10 @@ def test_pivot_table_nocols(self): df = DataFrame( {"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]} ) - rs = df.pivot_table(columns="cols", aggfunc=np.sum) - xp = df.pivot_table(index="cols", aggfunc=np.sum).T + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = df.pivot_table(columns="cols", aggfunc=np.sum) + xp = df.pivot_table(index="cols", aggfunc=np.sum).T tm.assert_frame_equal(rs, xp) rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"}) @@ -903,12 +905,19 @@ def test_no_col(self): # to help with a buglet self.data.columns = [k * 2 for k in self.data.columns] - table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + table = self.data.pivot_table( + index=["AA", "BB"], margins=True, aggfunc=np.mean + ) for value_col in table.columns: totals = table.loc[("All", ""), value_col] assert totals == self.data[value_col].mean() - table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean") + with tm.assert_produces_warning(FutureWarning, match=msg): + table = self.data.pivot_table( + index=["AA", "BB"], margins=True, aggfunc="mean" + ) for item in ["DD", "EE", "FF"]: totals = table.loc[("All", ""), item] assert totals == self.data[item].mean() @@ -964,7 +973,9 @@ def test_margin_with_only_columns_defined( } ) - result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc) + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc) expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns) tm.assert_frame_equal(result, expected) @@ -1990,8 +2001,11 @@ def test_pivot_string_as_func(self): def test_pivot_string_func_vs_func(self, f, f_numpy): # GH #18713 # for consistency purposes - result = pivot_table(self.data, index="A", columns="B", aggfunc=f) - expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy) + + msg = "The default value of numeric_only" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pivot_table(self.data, index="A", columns="B", aggfunc=f) + expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy) tm.assert_frame_equal(result, expected) @pytest.mark.slow
- [x] closes #46072 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. There are two cases we want to emit a deprecation warning for DataFrameGroupBy: - numeric_only is not specified and columns get dropped. In this case emit a warning that the default of numeric_only will change to False in the future. - numeric_only is specified to False and columns still get dropped. In this case emit a warning that the op will raise in the future.
https://api.github.com/repos/pandas-dev/pandas/pulls/47025
2022-05-14T16:04:46Z
2022-05-18T12:54:44Z
2022-05-18T12:54:43Z
2022-05-19T02:28:46Z
DOC: Fix some typos in pandas/.
diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 5b5995a671b2c..71df0c5a186b7 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/lib/ultrajsondec.c b/pandas/_libs/src/ujson/lib/ultrajsondec.c index fee552672b8b6..c7779b8b428ae 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsondec.c +++ b/pandas/_libs/src/ujson/lib/ultrajsondec.c @@ -32,7 +32,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 4469631b7b3f7..5d90710441a94 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -32,7 +32,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/python/JSONtoObj.c b/pandas/_libs/src/ujson/python/JSONtoObj.c index 14683f4c28cbe..c58f25b8f99ea 100644 --- a/pandas/_libs/src/ujson/python/JSONtoObj.c +++ b/pandas/_libs/src/ujson/python/JSONtoObj.c @@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 7de47749e500c..73d2a1f786f8b 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -30,7 +30,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c index a8fdb4f55bfca..def06cdf2db84 100644 --- a/pandas/_libs/src/ujson/python/ujson.c +++ b/pandas/_libs/src/ujson/python/ujson.c @@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/_libs/src/ujson/python/version.h b/pandas/_libs/src/ujson/python/version.h index 3f38642b6df87..15c55309d6270 100644 --- a/pandas/_libs/src/ujson/python/version.h +++ b/pandas/_libs/src/ujson/python/version.h @@ -29,7 +29,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library https://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index d2e6b6e935ed5..84160344437b5 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -1,5 +1,5 @@ """ -EA-compatible analogue to to np.putmask +EA-compatible analogue to np.putmask """ from __future__ import annotations diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index e015a3e5a941a..752ce28c58f55 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -720,7 +720,7 @@ def _values(self) -> np.ndarray: if isinstance(vals, ABCDatetimeIndex): # TODO: this can be removed after Timestamp.freq is removed # The astype(object) below does not remove the freq from - # the underlying Timestamps so we remove it here to to match + # the underlying Timestamps so we remove it here to match # the behavior of self._get_level_values vals = vals.copy() vals.freq = None diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ded525cd099fc..2e638f5b0fb3d 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1892,7 +1892,7 @@ def create_block_manager_from_blocks( # If verify_integrity=False, then caller is responsible for checking # all(x.shape[-1] == len(axes[1]) for x in blocks) # sum(x.shape[0] for x in blocks) == len(axes[0]) - # set(x for for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0]))) + # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0]))) # all(blk.ndim == 2 for blk in blocks) # This allows us to safely pass verify_integrity=False diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py index 9d0a2fa81b53b..3cc4fa4713831 100644 --- a/pandas/tests/indexes/multi/conftest.py +++ b/pandas/tests/indexes/multi/conftest.py @@ -8,7 +8,7 @@ ) -# Note: identical the the "multi" entry in the top-level "index" fixture +# Note: identical the "multi" entry in the top-level "index" fixture @pytest.fixture def idx(): # a MultiIndex used to test the general functionality of the diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 426192ab46914..19ea6753c616c 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -1180,7 +1180,7 @@ def test_iloc_getitem_int_single_ea_block_view(self): arr = interval_range(1, 10.0)._values df = DataFrame(arr) - # ser should be a *view* on the the DataFrame data + # ser should be a *view* on the DataFrame data ser = df.iloc[2] # if we have a view, then changing arr[2] should also change ser[0] diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py index a350b6fe7546d..8d9f075d8674d 100644 --- a/pandas/tests/io/formats/style/test_matplotlib.py +++ b/pandas/tests/io/formats/style/test_matplotlib.py @@ -216,7 +216,7 @@ def test_background_gradient_gmap_array_raises(gmap, axis): ], ) def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap): - # test gmap given as DataFrame that it aligns to the the data including subset + # test gmap given as DataFrame that it aligns to the data including subset expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset) result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset) assert expected._compute().ctx == result._compute().ctx @@ -232,7 +232,7 @@ def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, ex ], ) def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap): - # test gmap given as Series that it aligns to the the data including subset + # test gmap given as Series that it aligns to the data including subset expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute() result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute() assert expected.ctx == result.ctx
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47022
2022-05-14T12:40:39Z
2022-05-18T13:22:08Z
2022-05-18T13:22:08Z
2022-05-18T13:22:12Z
MAINT: Fix some typos.
diff --git a/LICENSES/ULTRAJSON_LICENSE b/LICENSES/ULTRAJSON_LICENSE index 3b2886eb9cfae..a905fb017d813 100644 --- a/LICENSES/ULTRAJSON_LICENSE +++ b/LICENSES/ULTRAJSON_LICENSE @@ -28,7 +28,7 @@ Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. -Numeric decoder derived from from TCL library +Numeric decoder derived from TCL library http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms * Copyright (c) 1988-1993 The Regents of the University of California. * Copyright (c) 1994 Sun Microsystems, Inc. diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index ff6bb582e1af5..d871f907232f5 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -146,10 +146,10 @@ def setup(self, fill_value): def make_block_array(self, length, num_blocks, block_size, fill_value): arr = np.full(length, fill_value) - indicies = np.random.choice( + indices = np.random.choice( np.arange(0, length, block_size), num_blocks, replace=False ) - for ind in indicies: + for ind in indices: arr[ind : ind + block_size] = np.random.randint(0, 100, block_size) return SparseArray(arr, fill_value=fill_value) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 4ed71913d7b4d..4f6972f3eaf2a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3529,7 +3529,7 @@ See the :ref:`cookbook<cookbook.excel>` for some advanced strategies. **Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.** This is no longer supported, switch to using ``openpyxl`` instead. - Attempting to use the the ``xlwt`` engine will raise a ``FutureWarning`` + Attempting to use the ``xlwt`` engine will raise a ``FutureWarning`` unless the option :attr:`io.excel.xls.writer` is set to ``"xlwt"``. While this option is now deprecated and will also raise a ``FutureWarning``, it can be globally set and the warning suppressed. Users are recommended to diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 85c552a7d596f..0b2341bef413e 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -612,7 +612,7 @@ "source": [ "### Acting on the Index and Column Headers\n", "\n", - "Similar application is acheived for headers by using:\n", + "Similar application is achieved for headers by using:\n", " \n", "- [.applymap_index()][applymapindex] (elementwise): accepts a function that takes a single value and returns a string with the CSS attribute-value pair.\n", "- [.apply_index()][applyindex] (level-wise): accepts a function that takes a Series and returns a Series, or numpy array with an identical shape where each element is a string with a CSS attribute-value pair. This method passes each level of your Index one-at-a-time. To style the index use `axis=0` and to style the column headers use `axis=1`.\n",
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47021
2022-05-14T12:38:56Z
2022-05-15T02:58:33Z
2022-05-15T02:58:33Z
2022-05-15T02:58:39Z
CI: Move 32 bit Linux build to GHA
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml new file mode 100644 index 0000000000000..500e800a082d9 --- /dev/null +++ b/.github/workflows/32-bit-linux.yml @@ -0,0 +1,43 @@ +name: 32 Bit Linux + +on: + push: + branches: + - main + - 1.4.x + pull_request: + branches: + - main + - 1.4.x + paths-ignore: + - "doc/**" + +jobs: + pytest: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Run 32-bit manylinux2014 Docker Build / Tests + run: | + docker pull quay.io/pypa/manylinux2014_i686 + docker run --platform linux/386 -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \ + /bin/bash -xc "cd pandas && \ + /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \ + . ~/virtualenvs/pandas-dev/bin/activate && \ + python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \ + pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \ + python setup.py build_ext -q -j2 && \ + python -m pip install --no-build-isolation --no-use-pep517 -e . && \ + export PANDAS_CI=1 && \ + pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml" + + - name: Publish test results for Python 3.8-32 bit full Linux + uses: actions/upload-artifact@v3 + with: + name: Test results + path: test-data.xml + if: failure() diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index 8ca4cce155e96..23a48e567dfe9 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -2,7 +2,7 @@ # Unfreeze(by commentingthe if: false() condition) once the # next Python Dev version has released beta 1 and both Cython and numpy support it # After that Python has released, migrate the workflows to the -# posix GHA workflows/Azure pipelines and "freeze" this file by +# posix GHA workflows and "freeze" this file by # uncommenting the if: false() condition # Feel free to modify this comment as necessary. diff --git a/README.md b/README.md index 2216f59965354..fc3f988dc6809 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![Package Status](https://img.shields.io/pypi/status/pandas.svg)](https://pypi.org/project/pandas/) [![License](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/main/LICENSE) -[![Azure Build Status](https://dev.azure.com/pandas-dev/pandas/_apis/build/status/pandas-dev.pandas?branch=main)](https://dev.azure.com/pandas-dev/pandas/_build/latest?definitionId=1&branch=main) [![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=main)](https://codecov.io/gh/pandas-dev/pandas) [![Downloads](https://static.pepy.tech/personalized-badge/pandas?period=month&units=international_system&left_color=black&right_color=orange&left_text=PyPI%20downloads%20per%20month)](https://pepy.tech/project/pandas) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/pydata/pandas) diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 37df662df8edc..0000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,50 +0,0 @@ -# Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml -trigger: - branches: - include: - - main - - 1.4.x - paths: - exclude: - - 'doc/**' - -pr: - autoCancel: true - branches: - include: - - main - - 1.4.x - -variables: - PYTEST_WORKERS: auto - PYTEST_TARGET: pandas - PATTERN: "not slow and not db and not network and not single_cpu" - PANDAS_CI: 1 - -jobs: -- job: py38_32bit - pool: - vmImage: ubuntu-18.04 - - steps: - # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941 - - script: | - docker pull quay.io/pypa/manylinux2014_i686 - docker run -v $(pwd):/pandas quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "cd pandas && \ - /opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \ - . ~/virtualenvs/pandas-dev/bin/activate && \ - python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \ - pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \ - python setup.py build_ext -q -j2 && \ - python -m pip install --no-build-isolation -e . && \ - export PANDAS_CI=1 && \ - pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python 3.8-32 bit full Linux' diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index b0ba275e3d895..fcaa8adcdcae9 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -289,13 +289,11 @@ library. This makes type checkers aware of the type annotations shipped with pan Testing with continuous integration ----------------------------------- -The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ and -`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__ +The pandas test suite will run automatically on `GitHub Actions <https://github.com/features/actions/>`__ continuous integration services, once your pull request is submitted. However, if you wish to run the test suite on a branch prior to submitting the pull request, then the continuous integration services need to be hooked to your GitHub repository. Instructions are here -for `GitHub Actions <https://docs.github.com/en/actions/>`__ and -`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/?view=azure-devops>`__. +for `GitHub Actions <https://docs.github.com/en/actions/>`__. A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, then you will get a red 'X', where you can click through to see the individual failed tests. diff --git a/pandas/conftest.py b/pandas/conftest.py index b84d6fc9c2b99..dc03f081388b8 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -658,7 +658,7 @@ def index_with_missing(request): """ # GH 35538. Use deep copy to avoid illusive bug on np-dev - # Azure pipeline that writes into indices_dict despite copy + # GHA pipeline that writes into indices_dict despite copy ind = indices_dict[request.param].copy(deep=True) vals = ind.values if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index ff31d93947776..522d25205eeb0 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -71,7 +71,7 @@ def s3_base(worker_id): if is_platform_arm() or is_platform_mac() or is_platform_windows(): # NOT RUN on Windows/MacOS/ARM, only Ubuntu # - subprocess in CI can cause timeouts - # - Azure pipelines/Github Actions do not support + # - Github Actions do not support # container services for the above OSs # - CircleCI will probably hit the Docker rate pull limit pytest.skip( diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 4cf6306dc39e5..a029c88fa3a7d 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -20,7 +20,7 @@ # TODO(GH#44584): Mark these as pytest.mark.single_cpu pytestmark = pytest.mark.skipif( is_ci_environment() and (is_platform_windows() or is_platform_mac()), - reason="On Azure CI, Windows can fail with " + reason="On GHA CI, Windows can fail with " "'Windows fatal exception: stack overflow' " "and MacOS can timeout", ) diff --git a/pandas/tests/window/test_online.py b/pandas/tests/window/test_online.py index ab435a39a497b..b98129e1b07ec 100644 --- a/pandas/tests/window/test_online.py +++ b/pandas/tests/window/test_online.py @@ -17,7 +17,7 @@ # TODO(GH#44584): Mark these as pytest.mark.single_cpu pytestmark = pytest.mark.skipif( is_ci_environment() and (is_platform_windows() or is_platform_mac()), - reason="On Azure CI, Windows can fail with " + reason="On GHA CI, Windows can fail with " "'Windows fatal exception: stack overflow' " "and MacOS can timeout", )
- [x] closes #46351 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature Also removes references to Azure since no builds run there anymore.
https://api.github.com/repos/pandas-dev/pandas/pulls/47020
2022-05-13T21:47:31Z
2022-05-15T02:56:13Z
2022-05-15T02:56:13Z
2022-05-15T13:55:00Z
REF: simplify tzconversion
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index b3a006141fadc..72bc6886b5175 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -58,9 +58,7 @@ ) from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.timezones import tz_compare -from pandas._libs.tslibs.tzconversion import ( - py_tz_convert_from_utc_single as tz_convert_from_utc_single, -) +from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single from pandas._libs.tslibs.vectorized import ( dt64arr_to_periodarr, get_resolution, diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 6c96df9a7ea0b..d06dc3160995d 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -54,10 +54,7 @@ from pandas._libs.tslibs.ccalendar cimport ( get_firstbday, get_lastbday, ) -from pandas._libs.tslibs.conversion cimport ( - convert_datetime_to_tsobject, - localize_pydatetime, -) +from pandas._libs.tslibs.conversion cimport localize_pydatetime from pandas._libs.tslibs.nattype cimport ( NPY_NAT, c_NaT as NaT, @@ -68,7 +65,6 @@ from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, pydate_to_dtstruct, ) -from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single from .dtypes cimport PeriodDtypeCode from .timedeltas cimport ( @@ -270,10 +266,8 @@ cdef _to_dt64D(dt): if getattr(dt, 'tzinfo', None) is not None: # Get the nanosecond timestamp, # equiv `Timestamp(dt).value` or `dt.timestamp() * 10**9` - nanos = getattr(dt, "nanosecond", 0) - i8 = convert_datetime_to_tsobject(dt, tz=None, nanos=nanos).value - dt = tz_convert_from_utc_single(i8, dt.tzinfo) - dt = np.int64(dt).astype('datetime64[ns]') + naive = dt.astimezone(None) + dt = np.datetime64(naive, "D") else: dt = np.datetime64(dt) if dt.dtype.name != "datetime64[D]": diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fcc9390a2cccd..abdb4aebb625f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1979,22 +1979,19 @@ default 'raise' value = tz_localize_to_utc_single(self.value, tz, ambiguous=ambiguous, nonexistent=nonexistent) - out = Timestamp(value, tz=tz) - if out is not NaT: - out._set_freq(self._freq) # avoid warning in constructor - return out + elif tz is None: + # reset tz + value = tz_convert_from_utc_single(self.value, self.tz) + else: - if tz is None: - # reset tz - value = tz_convert_from_utc_single(self.value, self.tz) - out = Timestamp(value, tz=tz) - if out is not NaT: - out._set_freq(self._freq) # avoid warning in constructor - return out - else: - raise TypeError( - "Cannot localize tz-aware Timestamp, use tz_convert for conversions" - ) + raise TypeError( + "Cannot localize tz-aware Timestamp, use tz_convert for conversions" + ) + + out = Timestamp(value, tz=tz) + if out is not NaT: + out._set_freq(self._freq) # avoid warning in constructor + return out def tz_convert(self, tz): """ diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 600ac54639dfc..2acad9ea34062 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -6,8 +6,8 @@ from numpy cimport ( ) -cdef int64_t tz_convert_from_utc_single( - int64_t utc_val, tzinfo tz, bint* fold=?, Py_ssize_t* outpos=? +cpdef int64_t tz_convert_from_utc_single( + int64_t utc_val, tzinfo tz ) except? -1 cdef int64_t tz_localize_to_utc_single( int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=* diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi index 8647dee712294..2531383b658fc 100644 --- a/pandas/_libs/tslibs/tzconversion.pyi +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -8,8 +8,8 @@ import numpy as np from pandas._typing import npt -# py_tz_convert_from_utc_single exposed for testing -def py_tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... +# tz_convert_from_utc_single exposed for testing +def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... def tz_localize_to_utc( vals: npt.NDArray[np.int64], tz: tzinfo | None, diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index fede9768f5fee..0cdc7b777f45f 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -183,29 +183,28 @@ timedelta-like} localized : ndarray[int64_t] """ cdef: - const int64_t[::1] deltas ndarray[uint8_t, cast=True] ambiguous_array - Py_ssize_t i, idx, pos, ntrans, n = vals.shape[0] + Py_ssize_t i, idx, pos, n = vals.shape[0] Py_ssize_t delta_idx_offset, delta_idx, pos_left, pos_right - int64_t *tdata int64_t v, left, right, val, new_local, remaining_mins int64_t first_delta, delta int64_t shift_delta = 0 - ndarray[int64_t] trans, result_a, result_b, dst_hours + ndarray[int64_t] result_a, result_b, dst_hours int64_t[::1] result npy_datetimestruct dts bint infer_dst = False, is_dst = False, fill = False bint shift_forward = False, shift_backward = False bint fill_nonexist = False str stamp + Localizer info = Localizer(tz) # Vectorized version of DstTzInfo.localize - if is_utc(tz) or tz is None: + if info.use_utc: return vals.copy() result = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0) - if is_tzlocal(tz) or is_zoneinfo(tz): + if info.use_tzlocal: for i in range(n): v = vals[i] if v == NPY_NAT: @@ -214,9 +213,8 @@ timedelta-like} result[i] = v - _tz_localize_using_tzinfo_api(v, tz, to_utc=True) return result.base # to return underlying ndarray - elif is_fixed_offset(tz): - _, deltas, _ = get_dst_info(tz) - delta = deltas[0] + elif info.use_fixed: + delta = info.delta for i in range(n): v = vals[i] if v == NPY_NAT: @@ -259,14 +257,9 @@ timedelta-like} "shift_backwards} or a timedelta object") raise ValueError(msg) - trans, deltas, _ = get_dst_info(tz) - - tdata = <int64_t*>cnp.PyArray_DATA(trans) - ntrans = trans.shape[0] - # Determine whether each date lies left of the DST transition (store in # result_a) or right of the DST transition (store in result_b) - result_a, result_b =_get_utc_bounds(vals, tdata, ntrans, deltas) + result_a, result_b =_get_utc_bounds(vals, info.tdata, info.ntrans, info.deltas) # silence false-positive compiler warning dst_hours = np.empty(0, dtype=np.int64) @@ -278,7 +271,7 @@ timedelta-like} # Shift the delta_idx by if the UTC offset of # the target tz is greater than 0 and we're moving forward # or vice versa - first_delta = deltas[0] + first_delta = info.deltas[0] if (shift_forward or shift_delta > 0) and first_delta > 0: delta_idx_offset = 1 elif (shift_backward or shift_delta < 0) and first_delta < 0: @@ -336,10 +329,10 @@ timedelta-like} # nonexistent times new_local = val - remaining_mins - 1 - delta_idx = bisect_right_i8(tdata, new_local, ntrans) + delta_idx = bisect_right_i8(info.tdata, new_local, info.ntrans) delta_idx = delta_idx - delta_idx_offset - result[i] = new_local - deltas[delta_idx] + result[i] = new_local - info.deltas[delta_idx] elif fill_nonexist: result[i] = NPY_NAT else: @@ -519,19 +512,7 @@ cdef ndarray[int64_t] _get_dst_hours( # ---------------------------------------------------------------------- # Timezone Conversion -def py_tz_convert_from_utc_single(int64_t utc_val, tzinfo tz): - # The 'bint* fold=NULL' in tz_convert_from_utc_single means we cannot - # make it cdef, so this is version exposed for testing from python. - return tz_convert_from_utc_single(utc_val, tz) - - -@cython.boundscheck(False) -cdef int64_t tz_convert_from_utc_single( - int64_t utc_val, - tzinfo tz, - bint* fold=NULL, - Py_ssize_t* outpos=NULL, -) except? -1: +cpdef int64_t tz_convert_from_utc_single(int64_t utc_val, tzinfo tz) except? -1: """ Convert the val (in i8) from UTC to tz @@ -541,8 +522,6 @@ cdef int64_t tz_convert_from_utc_single( ---------- utc_val : int64 tz : tzinfo - fold : bint*, default NULL - outpos : Py_ssize_t*, default NULL Returns ------- @@ -552,13 +531,8 @@ cdef int64_t tz_convert_from_utc_single( Localizer info = Localizer(tz) Py_ssize_t pos - if utc_val == NPY_NAT: - return utc_val - - if outpos is not NULL and info.use_pytz: - return info.utc_val_to_local_val(utc_val, outpos, fold) - else: - return info.utc_val_to_local_val(utc_val, &pos, fold) + # Note: caller is responsible for ensuring utc_val != NPY_NAT + return info.utc_val_to_local_val(utc_val, &pos) # OSError may be thrown by tzlocal on windows at or close to 1970-01-01 diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 99be0e63d58e2..ade5a2077767f 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -22,7 +22,7 @@ def _compare_utc_to_local(tz_didx): def f(x): - return tzconversion.py_tz_convert_from_utc_single(x, tz_didx.tz) + return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz) result = tz_convert_from_utc(tz_didx.asi8, tz_didx.tz) expected = np.vectorize(f)(tz_didx.asi8)
Broken off branch implementing tzconversion for non-nano
https://api.github.com/repos/pandas-dev/pandas/pulls/47019
2022-05-13T20:54:57Z
2022-05-15T02:55:22Z
2022-05-15T02:55:22Z
2022-05-15T16:10:55Z
REF: merge datetime_to_datetime64 into array_to_datetime
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 9492888e7db77..e6bbf52ab1272 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -59,6 +59,7 @@ from pandas._libs.tslibs.nattype cimport ( c_nat_strings as nat_strings, ) from pandas._libs.tslibs.timestamps cimport _Timestamp +from pandas._libs.tslibs.timezones cimport tz_compare from pandas._libs.tslibs import ( Resolution, @@ -447,6 +448,7 @@ cpdef array_to_datetime( bint string_to_dts_failed datetime py_dt tzinfo tz_out = None + bint found_tz = False, found_naive = False # specify error conditions assert is_raise or is_ignore or is_coerce @@ -465,18 +467,34 @@ cpdef array_to_datetime( elif PyDateTime_Check(val): seen_datetime = True if val.tzinfo is not None: + found_tz = True if utc_convert: _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value - else: + elif found_naive: raise ValueError('Tz-aware datetime.datetime ' 'cannot be converted to ' 'datetime64 unless utc=True') - elif isinstance(val, _Timestamp): - iresult[i] = val.value + elif tz_out is not None and not tz_compare(tz_out, val.tzinfo): + raise ValueError('Tz-aware datetime.datetime ' + 'cannot be converted to ' + 'datetime64 unless utc=True') + else: + found_tz = True + tz_out = val.tzinfo + _ts = convert_datetime_to_tsobject(val, None) + iresult[i] = _ts.value + else: - iresult[i] = pydatetime_to_dt64(val, &dts) - check_dts_bounds(&dts) + found_naive = True + if found_tz: + raise ValueError('Cannot mix tz-aware with ' + 'tz-naive values') + if isinstance(val, _Timestamp): + iresult[i] = val.value + else: + iresult[i] = pydatetime_to_dt64(val, &dts) + check_dts_bounds(&dts) elif PyDate_Check(val): seen_datetime = True diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi index ca6f301673f33..16fe853eef815 100644 --- a/pandas/_libs/tslibs/conversion.pyi +++ b/pandas/_libs/tslibs/conversion.pyi @@ -23,7 +23,4 @@ def ensure_timedelta64ns( arr: np.ndarray, # np.ndarray[timedelta64[ANY]] copy: bool = ..., ) -> np.ndarray: ... # np.ndarray[timedelta64ns] -def datetime_to_datetime64( - values: npt.NDArray[np.object_], -) -> tuple[np.ndarray, tzinfo | None]: ... # (np.ndarray[dt64ns], _) def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ... diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index e5217259a3648..5b7da7347a238 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -264,80 +264,6 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool = True): return dt64_result.view(TD64NS_DTYPE) -# ---------------------------------------------------------------------- - - -@cython.boundscheck(False) -@cython.wraparound(False) -def datetime_to_datetime64(ndarray values): - # ndarray[object], but can't declare object without ndim - """ - Convert ndarray of datetime-like objects to int64 array representing - nanosecond timestamps. - - Parameters - ---------- - values : ndarray[object] - - Returns - ------- - result : ndarray[datetime64ns] - inferred_tz : tzinfo or None - """ - cdef: - Py_ssize_t i, n = values.size - object val - int64_t ival - ndarray iresult # int64_t, but can't declare that without specifying ndim - npy_datetimestruct dts - _TSObject _ts - bint found_naive = False - tzinfo inferred_tz = None - - cnp.broadcast mi - - result = np.empty((<object>values).shape, dtype='M8[ns]') - iresult = result.view('i8') - - mi = cnp.PyArray_MultiIterNew2(iresult, values) - for i in range(n): - # Analogous to: val = values[i] - val = <object>(<PyObject**>cnp.PyArray_MultiIter_DATA(mi, 1))[0] - - if checknull_with_nat(val): - ival = NPY_NAT - elif PyDateTime_Check(val): - if val.tzinfo is not None: - if found_naive: - raise ValueError('Cannot mix tz-aware with ' - 'tz-naive values') - if inferred_tz is not None: - if not tz_compare(val.tzinfo, inferred_tz): - raise ValueError('Array must be all same time zone') - else: - inferred_tz = val.tzinfo - - _ts = convert_datetime_to_tsobject(val, None) - ival = _ts.value - check_dts_bounds(&_ts.dts) - else: - found_naive = True - if inferred_tz is not None: - raise ValueError('Cannot mix tz-aware with ' - 'tz-naive values') - ival = pydatetime_to_dt64(val, &dts) - check_dts_bounds(&dts) - else: - raise TypeError(f'Unrecognized value type: {type(val)}') - - # Analogous to: iresult[i] = ival - (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = ival - - cnp.PyArray_MultiIter_NEXT(mi) - - return result, inferred_tz - - # ---------------------------------------------------------------------- # _TSObject Conversion diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index dadfad394b903..ec6da61bde6c6 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2263,14 +2263,6 @@ def objects_to_datetime64ns( allow_mixed=allow_mixed, ) result = result.reshape(data.shape, order=order) - except ValueError as err: - try: - values, tz_parsed = conversion.datetime_to_datetime64(data) - # If tzaware, these values represent unix timestamps, so we - # return them as i8 to distinguish from wall times - return values.view("i8"), tz_parsed - except (ValueError, TypeError): - raise err except OverflowError as err: # Exception is raised when a part of date is greater than 32 bit signed int raise OutOfBoundsDatetime("Out of bounds nanosecond timestamp") from err diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index d231dc10d1004..d6dda373bdf92 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -24,7 +24,6 @@ OutOfBoundsDatetime, Timedelta, Timestamp, - conversion, iNaT, nat_strings, parsing, @@ -41,6 +40,7 @@ ArrayLike, DateTimeErrorChoices, Timezone, + npt, ) from pandas.util._exceptions import find_stack_level @@ -467,8 +467,6 @@ def _array_strptime_with_fallback( try: result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors) - if "%Z" in fmt or "%z" in fmt: - return _return_parsed_timezone_results(result, timezones, tz, name) except OutOfBoundsDatetime: if errors == "raise": raise @@ -494,6 +492,9 @@ def _array_strptime_with_fallback( else: # Indicates to the caller to fallback to objects_to_datetime64ns return None + else: + if "%Z" in fmt or "%z" in fmt: + return _return_parsed_timezone_results(result, timezones, tz, name) return _box_as_indexlike(result, utc=utc, name=name) @@ -512,38 +513,28 @@ def _to_datetime_with_format( Try parsing with the given format, returning None on failure. """ result = None - try: - # shortcut formatting here - if fmt == "%Y%m%d": - # pass orig_arg as float-dtype may have been converted to - # datetime64[ns] - orig_arg = ensure_object(orig_arg) - try: - # may return None without raising - result = _attempt_YYYYMMDD(orig_arg, errors=errors) - except (ValueError, TypeError, OutOfBoundsDatetime) as err: - raise ValueError( - "cannot convert the input to '%Y%m%d' date format" - ) from err - if result is not None: - utc = tz == "utc" - return _box_as_indexlike(result, utc=utc, name=name) - # fallback - res = _array_strptime_with_fallback( - arg, name, tz, fmt, exact, errors, infer_datetime_format - ) - return res - - except ValueError as err: - # Fallback to try to convert datetime objects if timezone-aware - # datetime objects are found without passing `utc=True` + # shortcut formatting here + if fmt == "%Y%m%d": + # pass orig_arg as float-dtype may have been converted to + # datetime64[ns] + orig_arg = ensure_object(orig_arg) try: - values, tz = conversion.datetime_to_datetime64(arg) - dta = DatetimeArray(values, dtype=tz_to_dtype(tz)) - return DatetimeIndex._simple_new(dta, name=name) - except (ValueError, TypeError): - raise err + # may return None without raising + result = _attempt_YYYYMMDD(orig_arg, errors=errors) + except (ValueError, TypeError, OutOfBoundsDatetime) as err: + raise ValueError( + "cannot convert the input to '%Y%m%d' date format" + ) from err + if result is not None: + utc = tz == "utc" + return _box_as_indexlike(result, utc=utc, name=name) + + # fallback + res = _array_strptime_with_fallback( + arg, name, tz, fmt, exact, errors, infer_datetime_format + ) + return res def _to_datetime_with_unit(arg, unit, name, tz, errors: str) -> Index: @@ -1007,17 +998,6 @@ def to_datetime( DatetimeIndex(['2020-01-01 01:00:00-01:00', '2020-01-01 02:00:00-01:00'], dtype='datetime64[ns, pytz.FixedOffset(-60)]', freq=None) - - Finally, mixing timezone-aware strings and :class:`datetime.datetime` always - raises an error, even if the elements all have the same time offset. - - >>> from datetime import datetime, timezone, timedelta - >>> d = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1))) - >>> pd.to_datetime(["2020-01-01 17:00 -0100", d]) - Traceback (most recent call last): - ... - ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 - unless utc=True - | Setting ``utc=True`` solves most of the above issues: @@ -1243,7 +1223,7 @@ def coerce(values): return values -def _attempt_YYYYMMDD(arg: np.ndarray, errors: str) -> np.ndarray | None: +def _attempt_YYYYMMDD(arg: npt.NDArray[np.object_], errors: str) -> np.ndarray | None: """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings @@ -1257,7 +1237,7 @@ def _attempt_YYYYMMDD(arg: np.ndarray, errors: str) -> np.ndarray | None: def calc(carg): # calculate the actual result - carg = carg.astype(object) + carg = carg.astype(object, copy=False) parsed = parsing.try_parse_year_month_day( carg / 10000, carg / 100 % 100, carg % 100 ) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 7597d4345cfce..0bd93a78227ff 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -5,6 +5,7 @@ from datetime import ( datetime, timedelta, + timezone, ) from decimal import Decimal import locale @@ -455,6 +456,14 @@ def test_to_datetime_parse_timezone_keeps_name(self): class TestToDatetime: + def test_to_datetime_mixed_datetime_and_string(self): + # GH#47018 adapted old doctest with new behavior + d1 = datetime(2020, 1, 1, 17, tzinfo=timezone(-timedelta(hours=1))) + d2 = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1))) + res = to_datetime(["2020-01-01 17:00 -0100", d2]) + expected = to_datetime([d1, d2]).tz_convert(pytz.FixedOffset(-60)) + tm.assert_index_equal(res, expected) + def test_to_datetime_np_str(self): # GH#32264 value = np.str_("2019-02-04 10:18:46.297000+0000")
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47018
2022-05-13T20:51:26Z
2022-05-21T19:47:14Z
2022-05-21T19:47:14Z
2022-05-21T21:53:29Z
Unpin xarray
diff --git a/environment.yml b/environment.yml index b4710e252384c..df69e654018fb 100644 --- a/environment.yml +++ b/environment.yml @@ -116,7 +116,7 @@ dependencies: - fsspec>=0.7.4 # for generic remote file operations - gcsfs>=0.6.0 # file IO when using 'gcs://...' path - sqlalchemy # pandas.read_sql, DataFrame.to_sql - - xarray<0.19 # DataFrame.to_xarray + - xarray # DataFrame.to_xarray - cftime # Needed for downstream xarray.CFTimeIndex test - pyreadstat # pandas.read_spss - tabulate>=0.8.3 # DataFrame.to_markdown diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e1459a66a0f12..24c3bcb7bf669 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3074,7 +3074,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal' >>> df_multiindex.to_xarray() <xarray.Dataset> - Dimensions: (animal: 2, date: 2) + Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' diff --git a/requirements-dev.txt b/requirements-dev.txt index 0f1d76b996df1..36d3720df34d8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -80,7 +80,7 @@ aiobotocore<2.0.0 fsspec>=0.7.4 gcsfs>=0.6.0 sqlalchemy -xarray<0.19 +xarray cftime pyreadstat tabulate>=0.8.3
- [X] closes [#42716](https://github.com/pandas-dev/pandas/issues/42716) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47016
2022-05-13T20:02:44Z
2022-05-16T09:23:03Z
2022-05-16T09:23:02Z
2022-05-16T09:23:03Z
CI: Ensure no-use-pep517 with no-build-isolation with new pip version
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 483353cfcb3cd..80448319f7918 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -104,6 +104,6 @@ echo "Build extensions" python setup.py build_ext -q -j3 echo "Install pandas" -python -m pip install --no-build-isolation -e . +python -m pip install --no-build-isolation --no-use-pep517 -e . echo "done"
xref https://github.com/scipy/oldest-supported-numpy/issues/53 We may even be able to replace no-build-isolation with no-use-pep517 https://github.com/scikit-learn/scikit-learn/pull/23339 but that could be a followup.
https://api.github.com/repos/pandas-dev/pandas/pulls/47015
2022-05-13T16:21:34Z
2022-05-15T13:49:48Z
2022-05-15T13:49:48Z
2022-05-15T13:54:31Z
DOC: pyarrow >= 8.0.0 supports timedelta
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 4ed71913d7b4d..5228ef9f3c5b6 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5470,7 +5470,7 @@ See the documentation for `pyarrow <https://arrow.apache.org/docs/python/>`__ an .. note:: These engines are very similar and should read/write nearly identical parquet format files. - Currently ``pyarrow`` does not support timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes. + ``pyarrow>=8.0.0`` supports timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes. These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library). .. ipython:: python
8.0.0 was released recently with timedelta support: https://arrow.apache.org/release/8.0.0.html, https://issues.apache.org/jira/browse/ARROW-6780
https://api.github.com/repos/pandas-dev/pandas/pulls/47012
2022-05-13T11:30:54Z
2022-05-15T02:57:45Z
2022-05-15T02:57:45Z
2022-05-15T02:57:50Z
TYP: resolve mypy ingores in core/indexing.py
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 3418f1cab0e6f..06b93622d3ca6 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -627,7 +627,7 @@ def iat(self) -> _iAtIndexer: class _LocationIndexer(NDFrameIndexerBase): _valid_types: str - axis = None + axis: int | None = None @final def __call__(self, axis=None): @@ -652,7 +652,7 @@ def _get_setitem_indexer(self, key): check_deprecated_indexers(x) if self.axis is not None: - key = self._tupleize_axis_indexer(key) + key = _tupleize_axis_indexer(self.ndim, self.axis, key) ax = self.obj._get_axis(0) @@ -737,17 +737,6 @@ def _maybe_mask_setitem_value(self, indexer, value): return indexer, value - @final - def _tupleize_axis_indexer(self, key) -> tuple: - """ - If we have an axis, adapt the given key to be axis-independent. - """ - new_key = [slice(None)] * self.ndim - # error: Invalid index type "Optional[Any]" for "List[slice]"; expected - # type "SupportsIndex" - new_key[self.axis] = key # type: ignore[index] - return tuple(new_key) - @final def _ensure_listlike_indexer(self, key, axis=None, value=None): """ @@ -1621,7 +1610,7 @@ def _get_setitem_indexer(self, key): key = list(key) if self.axis is not None: - key = self._tupleize_axis_indexer(key) + key = _tupleize_axis_indexer(self.ndim, self.axis, key) return key @@ -2137,13 +2126,11 @@ def _ensure_iterable_column_indexer(self, column_indexer): """ Ensure that our column indexer is something that can be iterated over. """ - ilocs: Sequence[int] + ilocs: Sequence[int] | np.ndarray if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): - ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment] - column_indexer - ] + ilocs = np.arange(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and is_bool_dtype( column_indexer.dtype ): @@ -2201,18 +2188,16 @@ def ravel(i): # TODO: This is hacky, align Series and DataFrame behavior GH#45778 if obj.ndim == 2 and is_empty_indexer(indexer[0]): return ser._values.copy() - ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values + ser_values = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: len_indexer = len(indexer[1]) - ser = ( - np.tile(ser, len_indexer) # type: ignore[assignment] - .reshape(len_indexer, -1) - .T + ser_values = ( + np.tile(ser_values, len_indexer).reshape(len_indexer, -1).T ) - return ser + return ser_values for i, idx in enumerate(indexer): ax = obj.axes[i] @@ -2428,6 +2413,15 @@ def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: return tuple(_tup) +def _tupleize_axis_indexer(ndim: int, axis: int, key) -> tuple: + """ + If we have an axis, adapt the given key to be axis-independent. + """ + new_key = [slice(None)] * ndim + new_key[axis] = key + return tuple(new_key) + + def convert_to_index_sliceable(obj: DataFrame, key): """ If we are index sliceable, then return my slicer, otherwise return None.
xref https://github.com/pandas-dev/pandas/issues/37715 Notes: `axis` has to be passed as an argument to `_tupleize_axis_indexer` in order to assert that it is not `None`. Since this kind of decouples it from the `_LocationIndexer` class, I made it a standalone utility function.
https://api.github.com/repos/pandas-dev/pandas/pulls/47010
2022-05-12T22:25:21Z
2022-05-25T22:45:24Z
2022-05-25T22:45:24Z
2022-05-25T22:45:31Z
Backport PR #46981 on branch 1.4.x (CI: Move MacOS build from Azure to GHA)
diff --git a/.github/workflows/windows.yml b/.github/workflows/macos-windows.yml similarity index 70% rename from .github/workflows/windows.yml rename to .github/workflows/macos-windows.yml index 6f267357554a3..560a421ec74ec 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/macos-windows.yml @@ -1,4 +1,4 @@ -name: Windows +name: Windows-MacOS on: push: @@ -21,18 +21,20 @@ env: jobs: pytest: - runs-on: windows-latest defaults: run: shell: bash -el {0} timeout-minutes: 90 strategy: matrix: + os: [macos-latest, windows-latest] env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml] fail-fast: false + runs-on: ${{ matrix.os }} + name: ${{ format('{0} {1}', matrix.os, matrix.env_file) }} concurrency: # https://github.community/t/concurrecy-not-work-for-push/183068/7 - group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-windows + group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }} cancel-in-progress: true steps: @@ -47,10 +49,17 @@ jobs: mamba-version: "*" channels: conda-forge activate-environment: pandas-dev - channel-priority: strict + channel-priority: ${{ matrix.os == 'macos-latest' && 'flexible' || 'strict' }} environment-file: ci/deps/${{ matrix.env_file }} use-only-tar-bz2: true + # ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib + # Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib + # Reason: image not found + - name: Upgrade pyarrow on MacOS + run: conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6 + if: ${{ matrix.os == 'macos-latest' }} + - name: Build Pandas uses: ./.github/actions/build_pandas diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0b2a9f5b2b0cd..0c6195ff6924b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -22,11 +22,6 @@ variables: PANDAS_CI: 1 jobs: -- template: ci/azure/posix.yml - parameters: - name: macOS - vmImage: macOS-10.15 - - job: py38_32bit pool: vmImage: ubuntu-18.04 diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml deleted file mode 100644 index df1d5049be33d..0000000000000 --- a/ci/azure/posix.yml +++ /dev/null @@ -1,50 +0,0 @@ -parameters: - name: '' - vmImage: '' - -jobs: -- job: ${{ parameters.name }} - timeoutInMinutes: 90 - pool: - vmImage: ${{ parameters.vmImage }} - strategy: - matrix: - py38: - ENV_FILE: ci/deps/actions-38.yaml - CONDA_PY: "38" - - py39: - ENV_FILE: ci/deps/actions-39.yaml - CONDA_PY: "39" - - py310: - ENV_FILE: ci/deps/actions-310.yaml - CONDA_PY: "310" - - steps: - - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin' - displayName: 'Set conda path' - - - script: rm /usr/local/miniconda/pkgs/cache/*.json - displayName: 'Workaround for mamba-org/mamba#488' - - - script: ci/setup_env.sh - displayName: 'Setup environment and build pandas' - - - script: | - conda run -n pandas-dev --no-capture-output ci/run_tests.sh - displayName: 'Test' - - - script: | - pushd /tmp - conda run -n pandas-dev python -c "import pandas; pandas.show_versions()" - popd - displayName: 'Build versions' - - - task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - failTaskOnFailedTests: true - testResultsFiles: 'test-data.xml' - testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }} - displayName: 'Publish test results' diff --git a/ci/setup_env.sh b/ci/setup_env.sh index a85767eb6f1b4..483353cfcb3cd 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -73,15 +73,6 @@ mamba install -n pandas-dev 'setuptools<60' echo "conda list -n pandas-dev" conda list -n pandas-dev -# From pyarrow on MacOS -# ImportError: 2): Library not loaded: @rpath/libssl.1.1.dylib -# Referenced from: /Users/runner/miniconda3/envs/pandas-dev/lib/libthrift.0.13.0.dylib -# Reason: image not found -if [[ "$(uname)" == 'Darwin' ]]; then - echo "Update pyarrow for pyarrow on MacOS" - conda install -n pandas-dev -c conda-forge --no-update-deps pyarrow=6 -fi - if [[ "$BITS32" == "yes" ]]; then # activate 32-bit compiler export CONDA_BUILD=1
Backport PR #46981: CI: Move MacOS build from Azure to GHA
https://api.github.com/repos/pandas-dev/pandas/pulls/47007
2022-05-12T15:55:05Z
2022-05-15T04:32:06Z
2022-05-15T04:32:06Z
2022-05-15T04:32:06Z
ENH: do not sort resulting columns when sort=False
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 128fd68674f96..5111ffbda14f9 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -745,6 +745,7 @@ Reshaping - Bug in concanenation with ``IntegerDtype``, or ``FloatingDtype`` arrays where the resulting dtype did not mirror the behavior of the non-nullable dtypes (:issue:`46379`) - Bug in :func:`concat` with identical key leads to error when indexing :class:`MultiIndex` (:issue:`46519`) - Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`) +- Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`) - Sparse diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b428155e722ff..8c861c199169b 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -216,7 +216,7 @@ def __internal_pivot_table( ) table = table.reindex(m, axis=1) - if isinstance(table, ABCDataFrame): + if sort is True and isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) if fill_value is not None: diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 31f720b9ec336..3471c83638126 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2167,6 +2167,28 @@ def test_pivot_table_sort_false(self): ) tm.assert_frame_equal(result, expected) + def test_pivot_table_sort_false_with_multiple_values(self): + df = DataFrame( + { + "firstname": ["John", "Michael"], + "lastname": ["Foo", "Bar"], + "height": [173, 182], + "age": [47, 33], + } + ) + result = df.pivot_table( + index=["lastname", "firstname"], values=["height", "age"], sort=False + ) + expected = DataFrame( + [[173, 47], [182, 33]], + columns=["height", "age"], + index=MultiIndex.from_tuples( + [("Foo", "John"), ("Bar", "Michael")], + names=["lastname", "firstname"], + ), + ) + tm.assert_frame_equal(result, expected) + def test_pivot_table_with_margins_and_numeric_columns(self): # GH 26568 df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]])
- [x] closes #17041 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Currently, `pivot_table` sorts the resulting columns even when `sort=False`. For example, the following code snippet produces the column ["age", "height"] in alphabetical order instead of the order in the original `df` or passed by `values=`. ```py import pandas as pd df = pd.DataFrame( { "firstname": ["John", "Michael"], "lastname": ["Foo", "Bar"], "height": [173, 182], "age": [47, 33], } ) result = df.pivot_table(index=["lastname", "firstname"], values=["height", "age"], sort=False) print(result) ``` ``` age height lastname firstname Foo John 47 173 Bar Michael 33 182 ``` This PR fixes this issue by not sorting the resulting columns when `sort=False` FYI, #17041 has some discussion about the order of the columns. PR #40954 added the keyword `sort` to `pivot_table` and was merged in v1.3.
https://api.github.com/repos/pandas-dev/pandas/pulls/46994
2022-05-11T06:10:41Z
2022-05-19T14:36:21Z
2022-05-19T14:36:20Z
2022-05-19T17:34:26Z
CLN: DatetimeTZBlock don't override values_for_json
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3836f3e6540b4..421fac4ea767b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1989,11 +1989,9 @@ class DatetimeTZBlock(DatetimeLikeBlock): _validate_ndim = True _can_consolidate = False - def values_for_json(self) -> np.ndarray: - # force dt64tz to go through object dtype - # tz info will be lost when converting to - # dt64 which is naive - return self.values.astype(object) + # Don't use values_for_json from DatetimeLikeBlock since it is + # an invalid optimization here(drop the tz) + values_for_json = NDArrayBackedExtensionBlock.values_for_json class ObjectBlock(NumpyBlock):
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46993
2022-05-11T01:27:21Z
2022-06-06T19:25:29Z
2022-06-06T19:25:29Z
2022-06-06T19:25:36Z
Backport PR #46991 on branch 1.4.x (CI/TST: Fix test for pyarrow 8.0 release)
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 00b205d018e89..eef2bb6639c36 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -14,6 +14,7 @@ pa_version_under5p0 = _palv < Version("5.0.0") pa_version_under6p0 = _palv < Version("6.0.0") pa_version_under7p0 = _palv < Version("7.0.0") + pa_version_under8p0 = _palv < Version("8.0.0") except ImportError: pa_version_under1p01 = True pa_version_under2p0 = True @@ -22,3 +23,4 @@ pa_version_under5p0 = True pa_version_under6p0 = True pa_version_under7p0 = True + pa_version_under8p0 = True diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d2a3a2eebef02..3df59a2eeef1f 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -17,6 +17,7 @@ pa_version_under2p0, pa_version_under5p0, pa_version_under6p0, + pa_version_under8p0, ) import pandas.util._test_decorators as td @@ -717,11 +718,14 @@ def test_duplicate_columns(self, pa): df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() self.check_error_on_write(df, pa, ValueError, "Duplicate column names found") - def test_unsupported(self, pa): - # timedelta + def test_timedelta(self, pa): df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)}) - self.check_external_error_on_write(df, pa, NotImplementedError) + if pa_version_under8p0: + self.check_external_error_on_write(df, pa, NotImplementedError) + else: + check_round_trip(df, pa) + def test_unsupported(self, pa): # mixed python objects df = pd.DataFrame({"a": ["a", 1, 2.0]}) # pyarrow 0.11 raises ArrowTypeError
Backport PR #46991: CI/TST: Fix test for pyarrow 8.0 release
https://api.github.com/repos/pandas-dev/pandas/pulls/46992
2022-05-11T01:11:08Z
2022-05-11T12:07:07Z
2022-05-11T12:07:07Z
2022-05-11T12:07:08Z
CI/TST: Fix test for pyarrow 8.0 release
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 00b205d018e89..eef2bb6639c36 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -14,6 +14,7 @@ pa_version_under5p0 = _palv < Version("5.0.0") pa_version_under6p0 = _palv < Version("6.0.0") pa_version_under7p0 = _palv < Version("7.0.0") + pa_version_under8p0 = _palv < Version("8.0.0") except ImportError: pa_version_under1p01 = True pa_version_under2p0 = True @@ -22,3 +23,4 @@ pa_version_under5p0 = True pa_version_under6p0 = True pa_version_under7p0 = True + pa_version_under8p0 = True diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 7c04a51e803f6..5b899079dfffd 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -18,6 +18,7 @@ pa_version_under2p0, pa_version_under5p0, pa_version_under6p0, + pa_version_under8p0, ) import pandas.util._test_decorators as td @@ -718,11 +719,14 @@ def test_duplicate_columns(self, pa): df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy() self.check_error_on_write(df, pa, ValueError, "Duplicate column names found") - def test_unsupported(self, pa): - # timedelta + def test_timedelta(self, pa): df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)}) - self.check_external_error_on_write(df, pa, NotImplementedError) + if pa_version_under8p0: + self.check_external_error_on_write(df, pa, NotImplementedError) + else: + check_round_trip(df, pa) + def test_unsupported(self, pa): # mixed python objects df = pd.DataFrame({"a": ["a", 1, 2.0]}) # pyarrow 0.11 raises ArrowTypeError
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/46991
2022-05-10T22:30:11Z
2022-05-11T01:10:27Z
2022-05-11T01:10:27Z
2022-05-11T01:16:22Z